text
stringlengths
454
608k
url
stringlengths
17
896
dump
stringclasses
91 values
source
stringclasses
1 value
word_count
int64
101
114k
flesch_reading_ease
float64
50
104
This project displays the latitude and longitude from a GPS sensor on an OLED display using the I2C protocol. In the video below you can see a splash screen is displayed with some information while the GPS is getting a fix on your location. When your location is known the screen updates to show the number of satellites in the fix and your latitude and longitude. Below you can see the I2C OLED and the blox GPS sensor I used. For the software I used probably the best GPS and display libraries. NeoGPS for GPS signal processing and the U8g2 monochrome graphics library for outputting to the OLED display. I used the U8x8 version of U8g2 as I only needed simple text and it’s also faster. These are both easy to install into the Arduino IDE as they are available in the Library Manager. You also need to install the Altsoftserial.h library. To do this open the library manager (Sketch > Include Library > Manage Libraries) and search for and then install each of the following: NeoGPS, Altsoftserial and U8g2. To see the Satellites tracked (TKD), Available (AVL) and the time (TME) at the bottom of the screen a few edits have to be made to one of the configuration files, In Windows,. The wiring is pretty simple as well. UNO -> OLED 5v -> VCC GND -> GND A4 -> SDA A5 -> SCL UNO -> GPS 3.3v -> VCC GND -> GND 8 -> TX 9 -> RX The code is below #include <NMEAGPS.h> #include <U8x8lib.h> //------------------------------------------------------------------------- // The GPSport.h include file tries to choose a default serial port // for the GPS device. If you know which serial port you want to use, // edit the GPSport.h file. #include <GPSport.h> //------------------------------------------------------------ // This object parses received characters // into the gps.fix() data structure static NMEAGPS gps; //------------------------------------------------------------ // Define a set of GPS fix information. It will // hold on to the various pieces as they are received from // an RMC sentence. It can be used anywhere in your sketch. static gps_fix fix; // the OLED used U8X8_SSD1306_128X64_NONAME_HW_I2C u8x8(/* reset=*/ U8X8_PIN_NONE); uint32_t timer; bool screencleared = false; void setup() { gpsPort.begin( 9600 ); u8x8.begin(); u8x8.setFont(u8x8_font_chroma48medium8_r); // Start up screen on OLED u8x8.fillDisplay(); delay(1000); for (uint8_t r = 0; r < u8x8.getRows(); r++ ) { u8x8.clearLine(r); delay(100); } delay(100); u8x8.println("Doing"); delay(500); u8x8.println("Some"); delay(500); u8x8.print("Stuff"); timer = millis(); } //---------------------------------------------------------------- // This function gets called about once per second, during the GPS // quiet time. static void doSomeWork() { // timer = millis(); // reset the timer //---------------------------------------------------------------- // This section is run before a fix is made to show sat info (Available, Tracked, Time) // Count how satellites are being received for each GNSS int totalSatellites, trackedSatellites; totalSatellites = gps.sat_count; for (uint8_t i = 0; i < totalSatellites; i++) { if (gps.satellites[i].tracked) { trackedSatellites++; } } u8x8.inverse(); u8x8.drawString(0, 6, "TKD"); u8x8.drawString(5, 6, "AVL"); u8x8.drawString(10, 6, "TME"); u8x8.noInverse(); enum {BufSizeTracked = 3}; //Space for 2 characters + NULL char trackedchar[BufSizeTracked]; snprintf (trackedchar, BufSizeTracked, "%d", trackedSatellites); u8x8.drawString(0, 7, " "); u8x8.drawString(0, 7, trackedchar); enum {BufSizeTotal = 3}; char availchar[BufSizeTotal]; snprintf (availchar, BufSizeTotal, "%d", totalSatellites); u8x8.drawString(5, 7, " "); u8x8.drawString(5, 7, availchar); if (fix.valid.time) { enum {BufSizeTime = 3}; int hour = fix.dateTime.hours + 2; int minute = fix.dateTime.minutes; char hourchar[BufSizeTime]; char minutechar[BufSizeTime]; snprintf (hourchar, BufSizeTime, "%d", hour); snprintf (minutechar, BufSizeTime, "%d", minute); if ( hour < 10 ) { snprintf (hourchar, BufSizeTime, "%02d", hour); } if ( minute < 10 ) { snprintf (minutechar, BufSizeTime, "%02d", minute); } u8x8.drawString(10, 7, hourchar); u8x8.drawString(12, 7, ":"); u8x8.drawString(13, 7, minutechar); } //---------------------------------------------------------------- // Once the location is found the top part of the screen is cleared and the fix data is shown if (fix.valid.location) { if (!screencleared) // do once { int r; for ( int r = 0; r < 5; r++ ) { u8x8.clearLine(r); } screencleared = true; } u8x8.inverse(); u8x8.drawString(0, 0, "FIX"); u8x8.drawString(0, 2, "LAT"); u8x8.drawString(0, 4, "LNG"); u8x8.noInverse(); enum {BufSize = 3}; // Space for 2 digits char satchar2[BufSize]; snprintf (satchar2, BufSize, "%d", fix.satellites); u8x8.drawString(4, 0, " "); u8x8.drawString(4, 0, satchar2);); u8x8.drawString(4, 4, longchar); } } // This is the main GPS parsing loop. static void GPSloop() { while (gps.available( gpsPort )) { fix = gps.read(); doSomeWork(); } } void loop() { GPSloop(); // until we get a fix, print a dot every 5 seconds if (millis() - timer > 5000 && !screencleared) { timer = millis(); // reset the timer u8x8.print("."); } } Buy Me A Coffee If you found something useful above please say thanks by buying me a coffee here... 32 Replies to “Arduino Uno with NEO GPS and OLED” hi, mine is getting stuck on the “Doing Some stuff” part. i am not sure whats going on here. Hi, Is the same hardware? You can use this software to test you are receiving data – u-blox.com/en/product/u-center-windows yeah i have the same hardware but its stil not working. do i need the software in order for this to work? im a little confused on what the software doe. thanks The software takes a reading from the GPS and shows you all sorts of information. It’s the best way to check the GPS is working. ok thanks hey i got U-Center, but its not displaying anything. do you think my gps is not working? Does it show anything when you have the GPS connected? How are you connecting it? no nothing is shown. i dont think its connecting how should i connect it? You need a USB to Serial converter. I used one of these:. I chose ‘Receiver > Port’ in U-Center and it started working. There are other USB to Serial converters around but they sometimes have driver problems. You’ll see this in device manager in Windows. Hi I followed through every single steps as you described using same hardware but I got an error. exit status 1 ‘class NMEAGPS’ has no member named ‘sat_count’ can you help me solving this plzz You probably just need to do this…. Your suggestion solved my compile issue with gps.sat_count. Thanks C:\Users\#YourUserName#\Documents\Arduino\libraries\NeoGPS\src After this src folder shows empty no idea what to do What happens if you search your PC for NMEAGPS_cfg.h ? Is it Windows 10 you are using? Maybe the files are being hidden. You could try reinstalling the NeoGPS library. Plzz reply something It is important project for me Thanks I can only suggest that you remove the NeoGPS folder from here: C:\Users\YOURUSERNAME\Documents\Arduino\libraries, Then install it again. I’ve posted the NMEAGPS_cfg.h file that works for me here: Lat and Long will not work (stuck at the programs pre-set), and should be removed from the program. Your Time zone is easily corrected by adding or subtracting the ‘+’ number. If anyone responds I’ll gladly give more detail. To fix Lat Long… char latchar[10]; // Buffer big enough for 9-character float dtostrf(fix.latitude() + 0, 0, 7, latchar); // Leave room for large numbers // dtostrf(77.848544, 3, 7, latchar); // Leave room for large numbers u8x8.drawString(4, 2, latchar); char longchar[10]; dtostrf(fix.longitude() + 0, 0, 7, longchar); //dtostrf(166.680378, 3, 7, longchar); ______________________________________________ To fix Timezone… enum {BufSizeTime = 3}; int hour = fix.dateTime.hours + 11;// was +2 TIME ZONE CORRECTION SETTING! int minute = fix.dateTime.minutes; Hi, I’ve removed the code dtostrf(77.848544, 3, 7, latchar); because it was setting the variable latchar to a hardcoded value rather than the value from the method fix.latitude to get your actual latitude. The code still worked.. you just have to be in range of enough satelites for fix.valid.location to be satisfied. Good point about the int hour = fix.dateTime.hours + 2;. You have to change this number to get the correct hour for your timezone to appear on the screen. Hi WordBot, cheers for the reply. Only issue I am having now is the fact that when 2400 hours is reached (midnight), the time continues counting past that. ie at actual 0700 hours this morning I got a displayed time of 3000 plus hours. I had to reset my ‘TIME ZONE CORRECTION SETTING!’ (as above) to bring the time back to normal, (seconds were fine). I’m still trying to figure that one out. Any suggestions welcome from you. Other than that, wonderful piece of kit! Thankyou! I’m not sure exactly how to do it but with some maths like hours%24 modulus you can probably fix it. To do it properly you have to allow for daylight savings and all that stuff. Maybe this library will help – Bit too much extra info for this tutorial. It really is a lovely little project, I’ll have to work something out to fix it, then it’ll be perfect. Perfect for my tracking telescope that demands position and time to track stars. Thankyou so much. I’m sure it’s a simple task, just gotta find the time! Cheers! Got this setup running with little trouble, however there seems to be severe error in LAT and LNG. I am located at near N33.3, W86.7 but display shows LAT 56.3 and LNG 50.2. TRK = 12 AVL = 12 FIX = 9. Time is correct. Richard Ah.. I had put in some extra numbers so the screen and video examples didn’t show my exact lat and long. I left them in the code though so not very secret! I think this must have been added in a recent update when I pasted in some old code to fix another problem. This section needs to be replaced:); Working great now!! Thanks Hi, I’m using an Arudino Mega, I’m recieving gps-data on serial monitor,but not on my display… how can I change the gpsPort, so that it runs GPSloop() and doSomeWork()??? Thank you for fast reply. I’ve not tried it but apparently… // The GPSport.h include file tries to choose a default serial port // for the GPS device. If you know which serial port you want to use, // edit the GPSport.h file. Options for serial pins: I keep getting this when trying to compile… any ideas? ‘class U8X8_SSD1306_128X64_NONAME_HW_I2C’ has no member named ‘println’ Hi. I’m not sure. Maybe it needs #include {Arduino.h} (replacing the {} with <>) at the top of the sketch? I’ll give it a try. Thank you! I added the statement at the top that you suggested but the issue persisted. I am wondering if the issue is that Im trying to compile this for an Arduino Nano Every instead of an Uno? I figured they would be the same but maybe not? When I review the output further, there are actually a LOT of failures: sketch_feb02a:46:8: error: ‘class U8X8_SSD1306_128X64_NONAME_HW_I2C’ has no member named ‘println’ u8x8.println(“Doing”); ^~~~~~~ sketch_feb02a:48:8: error: ‘class U8X8_SSD1306_128X64_NONAME_HW_I2C’ has no member named ‘println’ u8x8.println(“Some”); ^~~~~~~ sketch_feb02a:50:8: error: ‘class U8X8_SSD1306_128X64_NONAME_HW_I2C’ has no member named ‘print’ u8x8.print(“Stuff”); ^~~~~ sketch_feb02a:68:25: error: ‘class NMEAGPS’ has no member named ‘sat_count’ totalSatellites = gps.sat_count; ^~~~~~~~~ sketch_feb02a:70:13: error: ‘class NMEAGPS’ has no member named ‘satellites’; did you mean ‘parseSatellites’? if (gps.satellites[i].tracked) { ^~~~~~~~~~ parseSatellites From what I read the code should be the same. Maybe it’s not finding the libraries. Try removing the include for the screen and see if the error message changes.
https://robotzero.one/arduino-neo-gps-oled/
CC-MAIN-2022-33
refinedweb
1,893
77.33
03 January 2012 09:02 [Source: ICIS news] SINGAPORE (ICIS)--?xml:namespace> CPC Corp is looking to sell one to two 5,000 tonne lots for January delivery and another 20,000-30,000 tonnes for February and March shipments, the source added. Bids should be submitted by 5 January and should be valid for one day. Prices can either be fixed or on a floating price basis that will be settled against spot CFR quotes. The cargoes will be sold on a FOB The company is issuing the sell tender because its IX consumption in the first quarter of the year is lower than expected, the source said. CPC Corp is typically a nett buyer of IX which is used as feedstock in its three Kaohsiung-based paraxylene (PX) facilities with a combined nameplate capacity of 660,000 tonnes/year. The company will shut its 250,000 tonnes/year No 2 PX unit from mid-February to mid-April for a scheduled turnaround, while its 260,000 tonne/year No 3 PX facility will be shut for a month-long turnaround in August.
http://www.icis.com/Articles/2012/01/03/9519600/taiwans-cpc-corp-issues-tender-to-sell-up-to-35000-tonnes-of-ix.html
CC-MAIN-2014-52
refinedweb
184
64.14
This is a follow up tutorial to Introduction to Craft CMS. If you're unfamiliar with Craft, I recommend you read part one first. Then, return here and follow our installation and getting started guide. incorporating Twig templates. This won't be intimidating for those used to building WordPress themes. For others, unfortunately,. Last June, its proposal for a Craft CMS StackExchange site was approved in just five days. You can view a showcase of sites running Craft here. In this tutorial, I'll walk you through the process of installing Craft, its demo site, and getting familiar with Craft for building your own site with it. Installing Craft's On The Rocks Demo Site First, let's install Craft's On The Rocks demonstration site with sample theme and content. This site also unlocks all of the premium features for testing. I'm using Mac OS X with MAMP. My local sites run in the ~/Sites directory. Let's clone the Craft demo site from Github: git clone Then, let's set the file permissions for Craft's internal directories: cd ontherocks chmod 777 craft/storage/ chmod 774 craft/config Next, let's update the latest build of Craft. In February 2015, I used version 2.3.2627. However, you can find the very latest at the "Updates" page on the Craft website.. curl -L -o /tmp/Craft.zip unzip /tmp/Craft.zip -d BaseCraft cp -R BaseCraft/craft/app craft/app rm -R BaseCraft && rm /tmp/Craft.zip Then, I used PHPMyAdmin to create the database: Click on the ontherocks database in PHPMyAdmin. Then click Import and choose the file in ~/Sites/ontherocks/SQL/ontherocks.sql: Then edit the database configuration file with the credentials for your local MySQL database: nano ~/Sites/ontherocks/craft/config/db.php /** * Database Configuration * * All of your system's database configuration settings go in here. * You can see a list of the default settings in craft/app/config/defaults/db.php */ return array( 'server' => 'localhost', 'user' => 'rocks_user', 'password' => 'yourpassword', 'database' => 'ontherocks', 'tablePrefix' => 'craft', ); Create a new virtual host with the hostname “ontherocks.dev” that points to the public/ folder. Edit your /etc/hosts file to resolve ontherocks.dev to 127.0.0.1, if necessary: ## # Host Database # # localhost is used to configure the loopback interface # when the system is booting. Do not change this entry. ## 127.0.0.1 localhost 127.0.0.1 ontherocks.dev The Craft dashboard is located at. When loaded, you'll see something like this: monkeys...and eyeballs! Then, you'll be asked to update the database: You can log in with the following credentials: Username: admin, Password: password. The Craft Dashboard Here's the dashboard—looks a bit like WordPress, huh? Here's the On the Rocks homepage: Exploring the Happy Lager Demonstration Site The nice folks at Pixel & Tonic gave me a pre-release of their new, free demonstration site, which hopefully will be available by the time you read this—clone it here. The new site is called Happy Lager: The installation steps are exactly the same as we described above for On the Rocks. The Git repository should be located here: git clone Happy Lager makes use of Craft's deeper layout capabilities. Here's a pair of screenshots from the About page: Below the fold: Here's the Services page: Craft's Entries Page Here's the Entries page in the dashboard with all the content from Happy Lager: Take a look at the entry types on the left navigation pane: Singles, Channels and Structures. Craft offers more sophisticated, extensible data types than WordPress. Singles are one off pages that have a unique design such as your site home page. Channels are for entries organized by date, such as a blog or news sections. Structures are for content provided in a predefined order. The Craft Editor Here's the edit page. Note the breadth of possible fields which again are customizable, e.g. Title, Featured Image, Short Description, Heading, Subheading, Article Body. Notice below the fold how the pull quote type offers various layouts in the story flow as does the image that follows (but I couldn't include it all in the screenshot): This capability is what Craft calls its Matrix, and it allows for more powerful story composition and layout. Each block can have custom fields and custom position, and can be dragged and dropped into place in the flow. Here's a short video showing the Matrix: Here's the Live Preview—just beautiful. It reminds me of Ghost's Markdown preview which I wrote about in Keeping Up with Ghost 0.5 (Tuts+). Readers may remember I hate Markdown—so I very much appreciate the Craft live preview: Here's a bit more on Live Preview: Craft offers easy access to past revisions: The Craft Media Library Image assets are organized by group and available on the Assets page: If you've ever waited for WordPress to load your media page, you'll appreciate how fast Craft is. Constructing the Happy Lager Home Page Here's a closer look at the content behind parts of the Happy Lager home page—specifically, this is how it appears when you're editing the content: Craft uses Twig templates to transpose the structured content elements into web pages: {# # About template # ------------------- # # Single about template # # #} {% extends "_layouts/site" %} {% block main %} <header class="alt"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex12"> <h1 class="alpha center reverse">{{ title }}</h1> </div> </div> </header> <section class="info-hero alt"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex8 g3-offset2"> {% if entry.infoHeroTopText %} <h1 class="delta reverse center subfont">{{ entry.infoHeroTopText }}</h1> {% endif %} {% if entry.infoHeroBottomText %} <p class="epsilon center reverse">{{ entry.infoHeroBottomText }}</p> {% endif %} </div> </div> </section> <section class="alt"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex10 g3-offset1"> {% if entry.firstSectionHeader %} <h1 class="beta center">{{ entry.firstSectionHeader }}</h1> {% endif %} {% if entry.firstSectionSubheader %} <p class="delta center subfont caps">{{ entry.firstSectionSubheader }}</p> {% endif %} </div> </div> Section Types Craft sites are built around the sections we outlined above: Singles, Channels and Structures. Here's a short video that shows section types in more depth: Here are the sections associated with the Happy Lager demonstration site—notice how each corresponds to the primary navigation bar. The Homepage and About page are singles. The News and Work page are Channels. The Services page is a Structure. Of course, Craft also offers categories and tags. Categories help you organize your site's content ahead of time, whereas tags let you create an ad hoc folksonomy based on the content in each of your articles. Here's a short video describing categories and tags: Another cool feature Craft offers is the ability to route URL requests by friendly paths directly to specific sections: A Closer Look at Happy Lager Pages Here's the What's On Tap page from: Here's a look at the Services—How It's Made page at. It's a structure whose elements have a pre-defined order. Each of the image boxes is driven by an entry under services. You can change their order of appearance via drag and drop: Here's an example Twig template for this page: {% extends "_layouts/site" %} {% block main %} <header class="alt"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex12"> <h1 class="alpha center reverse">{{ title }}</h1> </div> </div> </header> {% for entry in craft.entries.section('Services').find() %} <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex12"> <p class="delta alt"> <a href="{{ entry.url }}"> {{ entry.title }} </a> </p> </div> </div> {% endfor %} {% endblock %} And here's the template for each entry. This gives you an idea of how to build Twig templates for your Craft site and what's involved: {# # Services entry template # ------------------- # # This template gets loaded whenever a Work entry’s URL is # requested. That’s because the Work section’s Template setting is # set to “services/_entry”, the path to this template. #} {% extends "_layouts/site" %} {% block main %} {% set currentUrl = craft.request.getUrl() %} {% set lastSegment = craft.request.getLastSegment() %} {% if lastSegment != 'services' %} <nav class="subnav"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex12"> <ul> {% for entry in craft.entries.section('Services').type('servicesDetail').find() %} <li> <a href="{{ entry.url }}" class="{% if entry.url == currentUrl %}current{% endif %} subfont caps"> {{ entry.title }} </a> </li> {% endfor %} </ul> </div> </div> </nav> {% endif %} {% if lastSegment == 'services' %} <header class="alt2"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex12"> <h1 class="alpha center reverse">{{ entry.title }}</h1> </div> </div> </header> <section class="alt"> <div class="flex"> <div class="g1-flex4 g2-flex6 g3-flex8 g3-offset2"> {% if entry.indexHeading %} {{ entry.indexHeading }} {% endif %} </div> </div> </section> <section> <div class="flex"> {% for entry in craft.entries.section('Services').type('servicesDetail').order('postDate desc').find() %} <div class="g1-flex4 g2-flex2 g3-flex4"> <a href="{{ entry.url }}" class="services-entry-wrap"> {% set image = entry.featuredImage.first() %} {% if image %} <img src="{{ image.getUrl('thumb') }}" alt="image.title"/> {% endif %} <h3 class="center">{{ entry.title }}</h3> {% if entry.shortDescription %} {{ entry.shortDescription }} {% endif %} </a> </div> {% endfor %} </div> {% else %} {% include "includes/articlebody" %} {% endif %} </section> {% if lastSegment != 'services' %} <section class="service-points"> {% for block in entry.servicesBody %} <div class="flex"> <div class="service-point"> <div class="g1-flex4 g2-flex3 g3-flex4"> <h4>{{ block.heading }}</h4> {{ block.text }} </div> <div class="g1-flex4 g2-flex3 g3-flex8"> {% set photo = block.image.first() %} {% if photo %} <img class="" src="{{ photo.url }}" alt="{{ photo.title }}"> {% endif %} </div> </div> </div> {% endfor %} </section> <section class="services-work-entry"> {% set entries = craft.entries.section('Work').limit(1).offset(2) %} {% for entry in entries %} {% set asset = entry.featuredImage.last() %}{% if asset %} <div style="background-image: url('{{ asset.url }}')"> {% endif %} <div class="flex"> <div class="g1-flex4 g2-flex3 g3-flex6"> <a href="{{ entry.url }}"> {% set asset = entry.featuredImage.first() %}{% if asset %} <img src="{{ asset.url }}" alt="{{ asset.title }}" width="616" height="204"> {% endif %} </a> </div> <div class="g1-flex4 g2-flex3 g3-flex6"> <div class="summary-wrap"> <h2 class="callout-border"><a href="{{ entry.url }}">{{ entry.title }}</a></h2> <h3><a href="{{ entry.url }}">{{ entry.heading }}</a></h3> {% if entry.subheading %} <p>{{ entry.subheading }}</p> {% endif %} <p><a href="{{ entry.url }}" class="view-more hero-cta">View More</a></p> </div> </div> </div> </div> {% endfor %} </section> {% endif %} {% endblock %} {% block foot %} {{ parent() }} <script type="text/javascript" src="/assets/lib/shapes-polyfill.min.js"></script> {% endblock %} Editing Entries Editing entries in Craft is simple, much like WordPress but with enhanced layout capabilities that you would normally require plugins to accomplish. Each section can have multiple user-defined entry types. For example, the News section here has two different types of entries: articles and links. Entry types allow you to store different types of content in the same section. Here's a short video on Entry types: Now that you have a sense of how site construction works in Craft, let's install a fresh version from scratch. Installing Craft From Scratch To install Craft, visit the website and download the codebase. I'm using Mac OS X with MAMP for my local development and testing. Craft provides detailed installation instructions and links to guides for Mac, Laravel, Heroku and even an automated installation with Composer. Rename the htaccess file: cd ~/Sites/craftcms/public mv htaccess .htaccess Create a symbolic link to your Craft public directory for MAMP: ln -s ~/Sites/craftcms/public /Applications/MAMP/htdocs/craft If you run your Craft installation locally from the host names “craft.dev” or “ontherocks.dev”, you will have the option to switch between Craft Personal, Craft Client, and Craft Pro for free, forever. I created the database via PHPMyAdmin. Edit the Craft database configuration file for your MySQL credentials: nano ./craft/config/db.php Enter your credentials in the fields below: /** * Database Configuration * * All of your system's database configuration settings go in here. * You can see a list of the default settings in craft/app/etc/config/defaults/db.php */ return array( // The database server name or IP address. Usually this is 'localhost' or '127.0.0.1'. 'server' => 'localhost', // The database username to connect with. 'user' => 'root', // The database password to connect with. 'password' => 'your-password', // The name of the database to select. 'database' => 'craft', // The prefix to use when naming tables. This can be no more than 5 characters. 'tablePrefix' => 'craft', ); Set up write permissions for these Craft app directories: chmod 744 ./craft/app chmod 744 ./craft/config chmod 744 ./craft/storage/ Visit the local Craft home page and you should see the monkeys again! Register your initial administration account: Set up your site properties: And that's it: Here's your dashboard again: Craft is so end-user focused that they include a support contact form on their dashboard home page. You can find site settings from the navigation bar at the upper right. It sort of reminds me of iOS: Here's what your default Craft site looks like when you begin: Yes, Craft doesn't have the WordPress community of themes. For the most part, you have to code your own theme. For the novice, WordPress still has an edge. On the other hand, you may already notice how fast Craft runs compared to WordPress. Where to Go From Here? Certainly, you can begin building your site's sample content, but you're going to need to learn about Craft themes and plugins. Here are a few resources that will help you: - Craft's Template Overview - Craft's Introduction to Plugins - Straight Up Craft's (third party site) Plugin Directory - Straight Up Craft's Directory of Consultants - Follow @CraftCMS on Twitter. I also want to give a shout out to Brandon Kelly, owner of Pixel & Tonic, the creators of Craft. Brandon was super helpful in answering my questions and gave me a preview of their new demonstration site which you should be able to access now. Related Links Envato Tuts+ tutorials are translated into other languages by our community members—you can be involved too!Translate this post
https://code.tutsplus.com/tutorials/getting-started-with-craft-cms--cms-23249
CC-MAIN-2017-09
refinedweb
2,346
57.87
Results 1 to 3 of 3 - Join Date - Oct 2011 - 106 - Thanks - 12 - Thanked 0 Times in 0 Posts Un-write Intro: I am a procrastinator. I start to do work and end up on facebook or otherwise. There exists an app for OS X, Self-Control. It uses the hosts file to redirect requests to localhost. However, such an app is not near the surface of google (and if it is, don't tell me. I'm nearly done writing it) for windows. Meat of the Post: I have a File IO class that writes and "unwrites" to the host file: Code: def write_to_hosts(ip_string, hosts): f = open(hosts, "a") f.write(ip_string) f.close() def un_write(ip_string, hosts): print "IP list: \n", ip_string f = open(hosts, "r") h_text = f.read() f.close() print "hosts file:\n"+h_text h_text.replace(ip_string, "") print "hosts file:\n"+h_text f = open(hosts, "w") f.write(h_text) f.close() un_write(ip_string, hosts). It doesn't un-write. (note, the What am I doing wrong here? - 05:46 PM.
http://www.codingforums.com/python/280316-un-write.html?s=5a00071e8b7b5693fc3718d6d0b8878c
CC-MAIN-2015-48
refinedweb
175
85.28
mbsinit man page mbsinit — test for initial shift state Synopsis #include <wchar.h> int mbsinit(const mbstate_t *ps); Description Character Value mbsinit() returns nonzero if *ps is an initial state, or if ps is NULL. Otherwise, it returns 0. Attributes For an explanation of the terms used in this section, see attributes(7). Conforming to POSIX.1-2001, POSIX.1-2008, C99. Notes The behavior of mbsinit() depends on the LC_CTYPE category of the current locale. See Also mbrlen(3), mbrtowc(3), mbsrtowcs(3), wcrtomb(3), wcsrtombs(3) Colophon This page is part of release 4.10 of the Linux man-pages project. A description of the project, information about reporting bugs, and the latest version of this page, can be found at. Referenced By mbrtowc(3), mbsnrtowcs(3), mbsrtowcs(3), wcrtomb(3), wcsnrtombs(3), wcsrtombs(3).
https://www.mankier.com/3/mbsinit
CC-MAIN-2017-13
refinedweb
137
51.14
"Karthik Gurumurthy" <karthikg at aztec.soft.net> wrote in message news:mailman.1010341632.10280.python-list at python.org... > hi all, > my code is behaving in a strange way. > > I have 4 modules > > test.py -- > from movie import Movie, Rental > movie.py --> has 2 classes Movie , Rental > #needs price and hence does > import price > price.py --> needs Movie > #does > from movie import Movie, Rental > cust.py --> has one class Customer > > so this is the sequence. > > test-->loads movie --> load price which in turn needs movie. > > Can someone tell me what's wrong here?? > > >>> import test > Traceback (most recent call last): > File "<stdin>", line 1, in ? > File "test.py", line 1, in ? > from movie import Movie, Rental > File "movie.py", line 1, in ? movie starts up and before anything is defined in the namespace, it does > import price > File "price.py", line 1, in ? which then tries to get a reference from movie to Movie and Rental > from movie import Movie, Rental > ImportError: cannot import name Movie But movie itself is still staring up and has not yet defined Movie You'll need to restructure in light of this dependency problem. One way might be to relocate price in movie. HTH, -- Emile van Sebille emile at fenx.com ---------
https://mail.python.org/pipermail/python-list/2002-January/124319.html
CC-MAIN-2019-30
refinedweb
207
87.31
A Django template library for manipulating URLs. Project description .=., ;c =\ __| _/ .'-'-._/-'-._ /.. ____ \ / {% spurl %} \ ( / \--\_>/-/'._ ) \-;_/\__;__/ _/ _/ '._}|==o==\{_\/ / /-._.--\ \_ // / /| \ \ \ / | | | \; | \ \ / / | :/ \: \ \_\ / | /.'| /: | \ \ | | |--| . |--| \_\ / _/ \ | : | /___--._) \ |_(---'-| >-'-| | '-' /_/ \_\ Spurl is a Django template library for manipulating URLs. It’s built on top of Zachary Voase’s excellent urlobject. Authored by Jamie Matthews, and some great contributors. Installation Either checkout spurl from GitHub, or install using pip: pip install django-spurl Add spurl to your INSTALLED_APPS: INSTALLED_APPS = ( ... 'spurl', ) Finally, whenever you want to use Spurl in a template, you need to load its template library: {% load spurl %} Usage Spurl is not a replacement for Django’s built-in {% url %} template tag. It is a general-purpose toolkit for manipulating URL components in templates. You can use it alongside {% url %} if you like (see below). Spurl provides a single template tag, called (surprisingly enough), spurl. You call it with a set of key=value keyword arguments, which are described fully below. To show some of the features of Spurl, we’ll go over a couple of simple example use cases. Adding query parameters to URLs Say you have a list of external URLs in your database. When you create links to these URLs in a template, you need to add a referrer=mysite.com query parameter to each. The simple way to do this might be: {% for url, title in list_of_links %} <a href="{{ url }}?referrer=mysite.com">{{ title }}</a> {% endfor %} The problem here is that you don’t know in advance if the URLs stored in your database already have query parameters. If they do, you’ll generate malformed links like. Spurl can fix this. Because it knows about the components of a URL, it can add parameters onto an existing query, if there is one. {% for url, title in list_of_links %} <a href="{% spurl base=url{{ title }}</a> {% endfor %} Note that when you pass a literal string to Spurl, you have to wrap it in double quotes. If you don’t, Spurl will assume it’s a variable name and try to look it up in the template’s context. SSL-sensitive external URLs. Suppose your site needs to display a gallery of images, the URLs of which have come from some third-party web API. Additionally, imagine your site needs to run both in secure and non-secure mode - the same content is available at both https or http URLs (depending on whether a visitor is logged in, say). Some browsers will complain loudly (displaying “Mixed content warnings” to the user) if the page being displayed is https but some of the assets are http. Spurl can fix this. {% for image_url in list_of_image_urls %} <img src="{% spurl base=image_url secure=request.is_secure %}" /> {% endfor %} This will take the image URL you supply and replace the scheme component (the http or https bit) with the correct version, depending on the return value of request.is_secure(). Note that the above assumes you’re using a RequestContext so that request is available in your template. Using alongside {% url %} Notice that Spurl’s functionality doesn’t overlap with Django’s built-in {% url %} tag. Spurl doesn’t know about your urlconf, and doesn’t do any URL reversing. In fact, Spurl is mostly useful for manipulating external URLs, rather than URLs on your own site. However, you can easily use Spurl with {% url %} if you need to. You just have to use the as keyword to put your reversed URL in a template variable, and then pass this to Spurl. As it’s a relative path (rather than a full URL) you should pass it using the path argument. For example, say you want to append some query parameters to a URL on your site: {% url your_url_name as my_url %} <a href="{% spurl path=my_urlClick here!</a> There is another way to use Spurl with {% url %}, see Embedding template tags below. Available arguments Below is a full list of arguments that Spurl understands. base If you pass a base argument to Spurl, it will parse its contents and use this as the base URL upon which all other arguments will operate. If you don’t pass a base argument, Spurl will generate a URL from scratch based on the components that you pass in separately. scheme Set the scheme component of the URL. Example: {% spurl base="" scheme="ftp" %} This will return See also: scheme_from, below. host Set the host component of the URL. Example: {% spurl base="" host="google.com" %} This will return See also: host_from, below. auth Handle HTTP Basic authentication, username and password can be passed in URL. Example: {% spurl base="" auth="user:pass" %} This will return path Set the path component of the URL. Example: {% spurl base="" path="/different/" %} This will return See also: path_from, below. add_path Append a path component to the existing path. You can add multiple add_path calls, and the results of each will be combined. Example: {% spurl base=STATIC_URL add_path="javascript" add_path="lib" add_path="jquery.js" %} This will return (assuming STATIC_URL is set to) See also: add_path_from, below. fragment Set the fragment component of the URL. Example: {% spurl base="" fragment="myfragment" %} This will return See also: fragment_from, below. port Set the port component of the URL. Example: {% spurl base="" port="8080" %} This will return See also: port_from, below. query Set the query component of the URL. Example: {% spurl base="" query="foo=bar&bar=baz" %} This will return The query argument can also be passed a dictionary from your template’s context. # views.py def my_view(request): my_query_params = {'foo': 'bar', 'bar': 'baz'} return render(request, 'path/to/template.html', {'my_query_params': my_query_params}) <!-- template.html --> {% spurl base="" query=my_query_params %} This will return Finally, you can pass individual template variables to the query. To do this, Spurl uses Django’s template system. For example: {% spurl base="" query="foo={{ variable_name }}" %} See also: query_from, below. add_query Append a set of parameters to an existing query. If your base URL might already have a query component, this will merge the existing parameters with your new ones. Example: {% spurl base="" add_query="bar=baz" %} This will return You can add multiple add_query calls, and the results of each will be combined: {% spurl base="" add_query="foo=bar" add_query="bar=baz" %} This will return Like the query argument above, the values passed to add_query can also be dictionaries, and they can contain Django template variables. See also: add_query_from, below. set_query Appends a set of parameters to an existing query, overwriting existing parameters with the same name. Otherwise uses the exact same syntax as add_query. See also: set_query_from, below. toggle_query Toggle the value of one or more query parameters between two possible values. Useful when reordering list views. Example: {% spurl base=request.get_full_path toggle_query="sort=ascending,descending" %} If the value of request.get_full_path() doesn’t have a sort parameter, one will be added with a value of ascending (the first item in the list is the default). If it already has a sort parameter, and it is currently set to ascending, it will be set to descending. If it’s already set to descending, it will be set to ascending. You can also specify the options as a dictionary, mapping the parameter name to a two-tuple containing the values to toggle. Example: # views.py SORT_PARAM = 'sort' ASCENDING = 'ascending' DESCENDING = 'descending' def my_view(request): if request.GET.get(SORT_PARAM, ASCENDING) == DESCENDING: object_list = MyModel.objects.order_by('-somefield') else: object_list = MyModel.objects.order_by('somefield') return render(request, 'path/to/template.html', { 'object_list': object_list, 'sort_params': {SORT_PARAM: (ASCENDING, DESCENDING)}, }) <!-- template.html --> <a href="{% spurl base=request.get_full_path toggle_query=sort_params %}">Reverse order</a> remove_query_param Remove a query parameter from an existing query: {% spurl base="" remove_query_param="foo" %} This will return Again, you can add multiple remove_query_param calls, and the results will be combined: {% spurl base="" remove_query_param="foo" remove_query_param="bar" %} This will return You can also remove parameters with specific values: {% spurl base="" remove_query_param="foo" remove_query_param="foo=baz" %} This will return Finally, you can pass individual template variables to the remove_query_param calls. To do this, Spurl uses Django’s template system. For example: {% spurl base="" remove_query_param="{{ variable_name }}" %} secure Control whether the generated URL starts with http or https. The value of this argument can be a boolean (True or False), if you’re using a context variable. If you’re using a literal argument here, it must be a quoted string. The strings "True" or "on" (case-insensitive) will be converted to True, any other string will be converted to False. Example: {% spurl base="" secure="True" %} This will return autoescape By default, Spurl will escape its output in the same way as Django’s template system. For example, an & character in a URL will be rendered as &. You can override this behaviour by passing an autoescape argument, which must be either a boolean (if passed from a template variable) or a string. The strings "True" or "on" (case-insensitive) will be converted to True, any other string will be converted to False. Added bonus: _from parameters As well as those listed above, Spurl provides a family of parameters for combining URLs. Given a base URL to start with, you can copy a component from another URL. These arguments expect to be passed a full URL (or anything that can be understood by URLObject.parse). This URL will be parsed, and then the component in question will be extracted and combined with the base URL. Below is a full list of the available _from methods. They have identical semantics to their counterparts above (except they expect a full URL, not just a URL component). - query_from - add_query_from - set_query_from - scheme_from - host_from - path_from - add_path_from - fragment_from - port_from Example: {% spurl base=" path_from="" %} This will return Building a URL without displaying it Like Django’s {% url %} tag, Spurl allows you to insert the generated URL into the template’s context for later use. Example: {% spurl base="" secure="True" as secure_url %} <p>The secure version of the url is {{ secure_url }}</p> Development To contribute, fork the repository, make your changes, add some tests, commit, push, and open a pull request. Artwork credit Superman ASCII art comes from Changes 0.6.8 (2021-11-15) - Fix toggle_query support when one word is a fragment of the other. 0.6.7 (2020-05-22) - Fixed MANIFEST.in 0.6.6 (2019-03-29) - Added support for an except clause to remove all but specifed query vars. 0.6.5 (2018-05-09) - Added support for Django 2.x and dropped support for older and non-LTS version of Django. 0.6.4 (2015-12-26) - Getting ready for Django 1.10 release. - Dropped support for Django 1.3 and older. 0.6.3 (2015-12-17) - Django 1.9 compatible (Albert Koch) 0.6.2 (2015-09-17) - Add support for template variables to remove_query_param. - Handle auth parameters to be able to add username:password to URLs. 0.6.1 (2015-07-14) - Python 3 compatible! 0.6.0 (2012-02-23) - Upgrade URLObject dependency to 2.0 0.5.0 (2011-12-14) - Fix typos in changelog. - Add family of arguments (_from) for combining URLs. - Add toggle_query argument. 0.4.0 (2011-12-07) - Upgrade URLObject dependency to 0.6.0 - Add remove_query_param argument. - Add support for template tags embedded within argument values. - Extensive refactoring. 0.3.0 (2011-08-18) - Add set_query argument. 0.2.0 (2011-08-08) - Add as argument to insert generated URL into template context. 0.1.0 (2011-07-29) - Initial release. Project details Release history Release notifications | RSS feed Download files Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
https://pypi.org/project/django-spurl/
CC-MAIN-2021-49
refinedweb
1,935
58.08
Emil Hejlesen2,223 Points stringcases.py i dont know why this doesnt work def stringcases(string): return ((string.upper(),), (string.lower(),), (string.capitalize(),), (string[::-1]),) 2 Answers Dave HarkerPro Student 15,049 Points Hi Emil Hejlesen, Really close. The challenge requires that you title case the string for the third element of the returned tuple, you've capitalized it - You'll need to use .title() not .capitalize() Also, I'm not sure why you've added the additional ,), around each element ... you're effectively returning a tuple containing single element tuples (kinda!) ... instead just try: return string.upper(), string.lower(), string.capitalize(), string[::-1] If you really want to separate the elements, you could use: return (string.upper()), (string.lower()), (string.capitalize()), (string[::-1]) # or hardcore/my eyes are burning mode return ((string.upper()), (string.lower()), (string.capitalize()), (string[::-1])) # but it isn't necessary and I think makes it harder to read Great effort though keep up the good work and happy coding, Dave Dave HarkerPro Student 15,049 Points Anytime, glad you found it useful Best of luck moving forward, and happy coding Dave adam n46,490 Points (('HI THERE',), ('hi there',), ('Hi there',), 'ereht ih') ^ that's some sample output i get with your function. Is that what the challenge wants from you? Remember that an easy way to make something into a tuple is putting a comma after each item: bob = 1, 2, 3, 4 # becomes (1, 2, 3, 4) Emil Hejlesen2,223 Points Emil Hejlesen2,223 Points hi dave it was the .capitalize() that was wrong, i changed it to .title() and the thing with the () was just to try and structure it for me but i didn't help, thank you for the help :)
https://teamtreehouse.com/community/stringcasespy
CC-MAIN-2018-51
refinedweb
290
61.26
The trim() method is used to get a string whose value is this string, with any leading and trailing whitespace removed. If this String object represents an empty character sequence or the first and last characters of character sequence represented by this String object both have codes greater than '\u0020' (the space character), then a reference to this String object is returned. Otherwise, if there is no character with a code greater than '\u0020' in the string, then a String object representing an empty string is returned. Otherwise, let k be the index of the first character in the string whose code is greater than '\u0020', and let m be the index of the last character in the string whose code is greater than '\u0020'. A String object is returned, representing the substring of this string that begins with the character at index k and ends with the character at index m-that is, the result of this.substring(k, m + 1). This method may be used to trim whitespace (as defined above) from the beginning and end of a string. Java Platform: Java SE 8 Syntax : trim() Return Value : A string whose value is this string, with any leading and trailing white space removed, or this string if it has no leading or trailing white space. Return Value Type: String Example : Java String trim() Method The following example shows the usage of java String() method. public class StringTrimExample { public static void main(String[] args) { // String declaration with trailing and leading space String inputValue = " This is an example Java trim() Method "; System.out.println(); // removing the white spaces String newValue = inputValue.trim(); System.out.println(newValue); System.out.println(); } } Output: This is an example Java trim() Method Java Code Editor: Join our Question Answer community to learn and share your programming knowledge.
http://www.w3resource.com/java-tutorial/string/string_trim.php
CC-MAIN-2017-34
refinedweb
302
55.37
Cement2, the next version of the Python application framework is almost ready for a beta release. I love Command Line Interfaces, and Cement2 is a slick way to create a CLI in Python. The canonical Hello World CLI is just five lines: from cement2.core import foundation app = foundation.lay_cement('helloworld') app.setup() app.run() print('Hello World') With the single call to lay_cement, this Python script is a full-fledged CLI complete with usage and help information: $ python helloworld.py --help usage: helloworld.py [-h] [--debug] [--quiet] optional arguments: -h, --help show this help message and exit --debug toggle debug output --quiet suppress all output I especially like how Cement separates interfaces and handlers, allowing for swappable implementations of features based on command line arguments. There is also a robust plugin architecture for those that want to extend it. Be sure and check out the Portland branch for the source as Cement2 is still under active development. Have comments? Send a tweet to @TheChangelog on Twitter. Subscribe to The Changelog Weekly – our weekly email covering everything that hits our open source radar.
http://thechangelog.com/cement2-python-cli-framework-nears-beta/
CC-MAIN-2014-42
refinedweb
183
55.44
MacFSEvents 0.2.4 Thread-based interface to file system observation primitives. Contents Overview MacFSEvents is a Python library that provides thread-safe directory observation primitives using callbacks. It wraps the Mac OS X FSEvents API in a C-extension. Requirements: - Mac OS X 10.5+ (Leopard) This software was written by Malthe Borch <mborch@gmail.com>. The pyfsevents module by Nicolas Dumazet was used for reference. Why? At this time of writing there are three other libraries that integrate with the FSEvents API: pyobjc-framework-FSEvents These use the PyObjC bridge infrastructure which most applications do not need. pyfsevents Not thread-safe (API is not designed to support it). fsevents Obsolete bindings to the socket API by John Sutherland. These issues have been addressed in MacFSEvents. The library provides a clean API and has full test coverage. Note that pyfsevents has bindings to the file descriptor observation primitives. This is not currently implemented by the present library. Usage To observe a directory structure (recursively) under path, we set up an observer thread and schedule an event stream: from fsevents import Observer observer = Observer() observer.start() def callback(subpath, mask): ... from fsevents import Stream stream = Stream(callback, path) observer.schedule(stream) Streams can observe any number of paths; simply pass them as positional arguments (or using the * operator): stream = Stream(callback, *paths) To start the observer in its own thread, use the start method: observer.starts() To start the observer in the current thread, use the run method (it will block the thread until stopped from another thread): observer.run() The callback function will be called when an event occurs. The subpath parameter contains the path at which the event happened (may be a subdirectory) while mask parameter is the event mask [1]. To stop observation, simply unschedule the stream and stop the observer: observer.unschedule(stream) observer.stop() While the observer thread will automatically join your main thread at this point, it doesn't hurt to be explicit about this: observer.join() We often want to know about events on a file level; to receive file events instead of path events, pass in file_events=True to the stream constructor: def callback(event): ... stream = Stream(callback, path, file_events=True) The event object mimick the file events of the inotify kernel extension available in newer linux kernels. It has the following attributes: - mask - The mask field is a bitmask representing the event that occurred. - cookie - The cookie field is a unique identifier linking together two related but separate events. It is used to link together an IN_MOVED_FROM and an IN_MOVED_TO event. - name - The name field contains the name of the object to which the event occurred. This is the absolute filename. Note that the logic to implement file events is implemented in Python; a snapshot of the observed file system hierarchies is maintained and used to monitor file events. Changelog 0.2.4 (2010-12-06) - Prevent crashes on recursive folder delete and multiple folder add. [totolici]. 0.2.3 (2010-07-27) - Fixed broken release. 0.2.2 (2010-07-26) - Python 2.4 compatibility [howitz] - Fixed an issue where the addition of a new directory would crash the program when using file event monitoring [andymacp]. 0.2.1 (2010-04-27) - Fixed an import issue [andymacp]. 0.2 (2010-04-26) - Fixed issue where existing directories would be reported along with a newly created one [marcboeker]. - Added support for file event monitoring. - Fixed reference counting bug which could result in a segmentation fault. 0.1 (2009-11-27) - Initial public release. - Downloads (All Versions): - 105 downloads in the last day - 495 downloads in the last week - 1686 downloads in the last month - Author: Malthe Borch - License: BSD - Platform: Mac OS X - Categories - Package Index Owner: malthe - DOAP record: MacFSEvents-0.2.4.xml
https://pypi.python.org/pypi/MacFSEvents/0.2.4
CC-MAIN-2014-15
refinedweb
634
56.76
> import module namespace xqxq = ''; > > variable $queryID := xqxq:prepare-main-module('declare variable $ext2 > external; $ext2'); > > fn:local-name-from-QName(xqxq:variable-type-name($queryID, xs:QName('ext2'))) > > returns xs:anyType. Shouldn't it be anyType? I think the QName is not > constructed > correctly. What do you mean is not being constructed correctly? > > xs:anyType should not be a string constant in the code. I think this should > come from the typemanager. The thing with the variables that doesn't have a type specified in code when asked about the XQType of the variable using the function var->getType it returns a NULL pointer that is why a set a constant value of xs:anyType, now you don't want this to be constant what should I do then? create a new any type XQType variable using the TypeManager and return it's schemaString()? -- Your team Zorba Coders is subscribed to branch lp:zorba. -- Mailing list: Post to : zorba-coders@lists.launchpad.net Unsubscribe : More help :
https://www.mail-archive.com/zorba-coders@lists.launchpad.net/msg21000.html
CC-MAIN-2018-47
refinedweb
166
58.79
Contents Abstract The iterator protocol in Python 2.x consists of two methods: __iter__() called on an iterable object to yield an iterator, and next() called on an iterator object to yield the next item in the sequence. Using a for loop to iterate over an iterable object implicitly calls both of these methods. This PEP proposes that the next method be renamed to __next__, consistent with all the other protocols in Python in which a method is implicitly called as part of a language-level protocol, and that a built-in function named next be introduced to invoke __next__ method, consistent with the manner in which other protocols are explicitly invoked. Names With Double Underscores In Python, double underscores before and after a name are used to distinguish names that belong to the language itself. Attributes and methods that are implicitly used or created by the interpreter employ this naming convention; some examples are: - __file__ - an attribute automatically created by the interpreter - __dict__ - an attribute with special meaning to the interpreter - __init__ - a method implicitly called by the interpreter Note that this convention applies to methods such as __init__ that are explicitly defined by the programmer, as well as attributes such as __file__ that can only be accessed by naming them explicitly, so it includes names that are used or created by the interpreter. (Not all things that are called "protocols" are made of methods with double-underscore names. For example, the __contains__ method has double underscores because the language construct x in y implicitly calls __contains__. But even though the read method is part of the file protocol, it does not have double underscores because there is no language construct that implicitly invokes x.read().) The use of double underscores creates a separate namespace for names that are part of the Python language definition, so that programmers are free to create variables, attributes, and methods that start with letters, without fear of silently colliding with names that have a language-defined purpose. (Colliding with reserved keywords is still a concern, but at least this will immediately yield a syntax error.) The naming of the next method on iterators is an exception to this convention. Code that nowhere contains an explicit call to a next method can nonetheless be silently affected by the presence of such a method. Therefore, this PEP proposes that iterators should have a __next__ method instead of a next method (with no change in semantics). Double-Underscore Methods and Built-In Functions The Python language defines several protocols that are implemented or customized by defining methods with double-underscore names. In each case, the protocol is provided by an internal method implemented as a C function in the interpreter. For objects defined in Python, this C function supports customization by implicitly invoking a Python method with a double-underscore name (it often does a little bit of additional work beyond just calling the Python method.) Sometimes the protocol is invoked by a syntactic construct: - x[y] --> internal tp_getitem --> x.__getitem__(y) - x + y --> internal nb_add --> x.__add__(y) - -x --> internal nb_negative --> x.__neg__() Sometimes there is no syntactic construct, but it is still useful to be able to explicitly invoke the protocol. For such cases Python offers a built-in function of the same name but without the double underscores. - len(x) --> internal sq_length --> x.__len__() - hash(x) --> internal tp_hash --> x.__hash__() - iter(x) --> internal tp_iter --> x.__iter__() Following this pattern, the natural way to handle next is to add a next built-in function that behaves in exactly the same fashion. - next(x) --> internal tp_iternext --> x.__next__() Further, it is proposed that the next built-in function accept a sentinel value as an optional second argument, following the style of the getattr and iter built-in functions. When called with two arguments, next catches the StopIteration exception and returns the sentinel value instead of propagating the exception. This creates a nice duality between iter and next: iter(function, sentinel) <--> next(iterator, sentinel) Previous Proposals This proposal is not a new idea. The idea proposed here was supported by the BDFL on python-dev [1] and is even mentioned in the original iterator PEP, PEP 234: (In retrospect, it might have been better to go for __next__() and have a new built-in, next(it), which calls it.__next__(). But alas, it's too late; this has been deployed in Python 2.2 since December 2001.) Objections There have been a few objections to the addition of more built-ins. In particular, Martin von Loewis writes [2]: I dislike the introduction of more builtins unless they have a true generality (i.e. are likely to be needed in many programs). For this one, I think the normal usage of __next__ will be with a for loop, so I don't think one would often need an explicit next() invocation. It is also not true that most protocols are explicitly invoked through builtin functions. Instead, most protocols are can be explicitly invoked through methods in the operator module. So following tradition, it should be operator.next. ... As an alternative, I propose that object grows a .next() method, which calls __next__ by default. Transition Plan Two additional transformations will be added to the 2to3 translation tool [3]: - Method definitions named next will be renamed to __next__. - Explicit calls to the next method will be replaced with calls to the built-in next function. For example, x.next() will become next(x). Collin Winter looked into the possibility of automatically deciding whether to perform the second transformation depending on the presence of a module-level binding to next [4] and found that it would be "ugly and slow". Instead, the translation tool will emit warnings upon detecting such a binding. Collin has proposed warnings for the following conditions [5]: - Module-level assignments to next. - Module-level definitions of a function named next. - Module-level imports of the name next. - Assignments to __builtin__.next. Implementation A patch with the necessary changes (except the 2to3 tool) was written by Georg Brandl and committed as revision 54910.
http://www.python.org/dev/peps/pep-3114/
CC-MAIN-2013-20
refinedweb
1,016
52.9
bhavin_shah1 15-10-2015 Hi I'm trying to recreate the time parting plugin in DTM using data elements The idea is to use the JS DOM date() function to populate a ever/prop and then to use the rule classification to split the string into the components ( saving me 3 variables) i'm not a java script expert so but I've managed to get window.Date() to return back the value I want, When I load window.Date as a jsobject in Data Elements it returns function Date() { [native code] } Any help on this or Is there a better way to achieve this? Thanks Bhavin michael_johns_6 You can use something like this as a custom script for your data element. return (function () { "use strict"; var now = new Date(); var year = "" + now.getFullYear(); var month = "" + (now.getMonth() + 1); if (month.length == 1) { month = "0" + month; } var day = "" + now.getDate(); if (day.length == 1) { day = "0" + day; } var hour = "" + now.getHours(); if (hour.length == 1) { hour = "0" + hour; } return year + "-" + month + "-" + day + " " + hour; })(); garry_przyklenk var dt = new Date(); var day, hour, minutes, daytype; if (dt.getDay() == "0") { day = "Sunday"; daytype = "Weekend"; } else if { (dt.getDay() == "1") { day = "Monday"; daytype = "Weekday"; } else if { (dt.getDay() == "2") { day = "Tuesday"; daytype = "Weekday"; } else if { (dt.getDay() == "3") { day = "Wednesday"; daytype = "Weekday"; } else if { (dt.getDay() == "4") { day = "Thursday"; daytype = "Weekday"; } else if { (dt.getDay() == "5") { day = "Friday"; daytype = "Weekday"; } else if { (dt.getDay() == "6") { day = "Saturday"; daytype = "Weekend"; } var time = daytype + ":" + day + ":" + dt.getHours() + ":" + dt.getMinutes(); return(time); Once you have the output of this data element (call it whatever, I called mine "Day and Time") you can setup a classification rule builder entry to break it up into Weekday/Weekend, Day of the Week, and Hour of the Day. I didn't bother breaking up hour of the day by every 30 minute interval, but whatever, you could if you like. I also didn't put in AM/PM, because I hated that for the longest time, makes it really hard to sort nicely.
https://experienceleaguecommunities.adobe.com/t5/dynamic-tag-management/get-time-parting-plugin-replication-in-dtm/td-p/199502
CC-MAIN-2021-04
refinedweb
342
73.98
I've been working for a while on a new project (stay tuned!) which has a watch app. Up until now I used a script that automatically increases the build number of the app, based on the value in Info.plist: buildNumber=$(/usr/libexec/PlistBuddy -c "Print CFBundleVersion" "$INFOPLIST_FILE") buildNumber=$(($buildNumber + 1)) /usr/libexec/PlistBuddy -c "Set :CFBundleVersion $buildNumber" "$INFOPLIST_FILE" This works really well, but the problem with watch targets is that the build number has to be the same for all targets. First idea was to just add this build script to all targets, which might work well, but it just felt wrong. Xcode offers a build setting called CURRENT_PROJECT_VERSION, which exists at the project level. If you go in each target and delete this setting (if it exists), it means each target will inherit the value found at the project level. Step 1 accomplished: a single source of truth. Step 2: automating it. I replaced the PlistBuddy script above with the much simpler one below, added as a build phase on the main app's target: NEW_VERSION=$(($CURRENT_PROJECT_VERSION + 1)) ❶ xcrun agvtool new-version $NEW_VERSION ❷ What this does is it first reads the current version and adds 1 to it (❶), it then passes it to xcrun's agvtool to be set as the new version (❷). I think it's much simpler and more elegant this way, even for a single target.]]> The other day we released DND Me, a simple Mac app that lives in your menu bar with which you can easily enable DND for a certain amount of time. 50% off during launch!]]> In a previous post I was talking about an easier way to create UIFonts: extension UIFont { static func regular(_ size: CGFloat) -> UIFont { return .systemFont(ofSize: size, weight: .regular) // Or any other font. } static func medium(_ size: CGFloat) -> UIFont { return .systemFont(ofSize: size, weight: .medium) } } While this indeed improves the usage, it doesn’t address the repeatability of our code. We tend to use one font in several places: let titleLabel = UILabel(frame: .zero) titleLabel.font = .medium(16) // [...] Another part of the app let otherTitleLabel = UILabel(frame: .zero) otherTitleLabel.font = .medium(16) This means that if we’ll ever want to change the size of the title, we’d have to find all the places and do the replacement. Usually, a weight and a size are associated with an element type, like a title, but not always — this will cause even more headaches. Following DRY practices, we could improve this, by creating a .title font: extension UIFont { @nonobjc static let title = UIFont.medium(16) } Now, the call site will look better from a semantic point of view: let titleLabel = UILabel(frame: .zero) titleLabel.font = .title // [...] Another part of the app let otherTitleLabel = UILabel(frame: .zero) otherTitleLabel.font = .title And, in the future, if we’ll want to change the size of the title, we’ll just have to change the static property 👍🏻. For. Just like it’s beneficial for the brain to learn several spoken languages, the same can be said about programming languages. The benefit might not seem big, but it adds up. Each language has its own set of rules, of best practices, of approaches and paradigms. Learning more than one language will increase your ability to see a problem from different angles, to widen your perspective, to bring and apply principles from one language to another. Adrian, in his post about being a developer over 40, gives advice as to learn a new programming every year and his approach is to build a simple calculator with whatever he learns — getting familiar with the IDE and APIs. If you’re adventurous, you can go one step further and do some exercises, some katas or even a bit more complex apps. The point is to get in contact with it, go slightly beyond the very basics, to expand your knowledge. In 10 years you might say ”7 of these languages are useless to me, I have never used them”, but the knowledge adds up as does the perspective widening. At the very least, your self-confidence that you can do it and that you did builds up. Lastly, if you wouldn’t have set out to learn new languages, you wouldn’t have learned the 3 that you found useful, either. Keep learning; it’s a life-long process, with tremendous benefits.]]> We!]]> If you’d like to learn a new programming language, try to aim for ”write a line of code” every day. You might rightfully ask ”how will one line help in the long run?” and the answer is rather simple: you will almost never stop at one line of code; it also keeps you connected and the process ongoing. This is how I learned Ruby a few years ago, by aiming for ”a couple of lines of code”. From there, I ended up creating a ”real” blog with Sinatra (was using some WYSIWYG editor until then). This, in turn, led to me using my blog as a playground for every technology I wanted to learn ever since: Node.js, Swift and now React (still WIP). Don’t dismiss the power of small progress. You’ll eventually end up learning that new language/framework/tool, which, in turn, might lead to other improvements down the line.]]> Everyone loves animations and I think every app should make use of them; with care, but that in a future post. The easiest way to add animations is with UIView’s animate method, or with UIViewPropertyAnimators. Pretty straightforward and easy to use, but they don’t support animating CALayer properties, like borderColor or borderWidth. For these, we have CABasicAnimation, or rather all of its concrete subclasses: CABasicAnimation, CAKeyframeAnimation, CAAnimationGroup, or CATransition. In this post we’ll quickly cover CABasicAnimation and CAAnimationGroup. Say we want to animate the borderColor of a view, this is how we’d go about it: let borderColorAnimation = CABasicAnimation(keyPath: "borderColor") // 1 borderColorAnimation.duration = 0.15 // 2 borderColorAnimation.fromValue = UIColor.red.cgColor // 3 borderColorAnimation.toValue = UIColor.blue.cgColor // 4 borderColorAnimation.timingFunction = CAMediaTimingFunction(name: .linear) // 5 borderColorAnimation.beginTime = CACurrentMediaTime() + 0.2 // 6 view.layer.add(borderColorAnimation, forKey: "blueBorderColorAnimation") // 7 First, we initialize one with a keyPath, the property of the layer we want to animate (1). We then set some properties, like the duration (2), the starting (3) and end (4) colors, the timing function (5) and a start time (6). Lastly, we have to add this animation to our layer under a key, which is just a string that identifies the animation. The fromValue and toValue are of type Any, since they have to be able to accept pretty much anything, from CGColors (our case), to CGRects if we want to animate the bounds, or Floats, if we want to animate the borderWidth (like below). The timing functions are the equivalent of the ones used on UIView.animate, UIViewAnimationOptions, UIViewAnimationOptionCurveLinear in our case. The CACurrentMediaTime() gives us the current time, to which we add 0.2 seconds, since in our case, we want it to start with a slight delay. Now, if we want to animate the borderWidth at the same time, we’d go about in a similar fashion: let borderWidthAnimation = CABasicAnimation(keyPath: "borderWidth") borderWidthAnimation.duration = 0.15 borderWidthAnimation.fromValue = 0.5 borderWidthAnimation.toValue = 1.5 borderWidthAnimation.timingFunction = CAMediaTimingFunction(name: .linear) borderWidthAnimation.beginTime = CACurrentMediaTime() + 0.2 view.layer.add(borderWidthAnimation, forKey: "tripleBorderWidthAnimation") The same properties are set, except the fromValue and toValue, which are now Floats. This already seems a bit wrong; we’re duplicating a lot of code, like the duration, timingFunction and the beginTime. Luckily for us, we can make use of CAAnimationGroups. These allow multiple animations to be grouped and run together. let borderColorAnimation = CABasicAnimation(keyPath: "borderColor") // 1 borderColorAnimation.fromValue = UIColor.red.cgColor borderColorAnimation.toValue = UIColor.blue.cgColor let borderWidthAnimation = CABasicAnimation(keyPath: "borderWidth") // 2 borderWidthAnimation.fromValue = 0.5 borderWidthAnimation.toValue = 1.5 let group = CAAnimationGroup() // 3 group.duration = 0.15 group.timingFunction = CAMediaTimingFunction(name: .linear) group.beginTime = CACurrentMediaTime() + 0.2 group.animations = [borderColorAnimation, borderWidthAnimation] // 4 view.layer.add(group, forKey: "borderChangeAnimationGroup") // 5 We first create our borderColor (1) and borderWidth (2) animations as before, but we only set the fromValue and toValue. We then create a group (3), set the remaining properties on it — duration, timingFunction, beginTime — and also, a new one, called animations; here we set an array of CAAnimations that we want to run concurrently in this group. Lastly, we add the group to the layer, just like we previously did with the individual CABasicAnimations — the group is just a subclass of CAAnimation, remember. This will run both animations at the same time, with the same time properties. It also allows us to write cleaner code that’s easier to reason with, but also less prone to errors, since everything is configured in one place. Imagine having 5 of these animations, then realizing that something is wrong — we’d have to change the values in all 5 places! This was a very, very short introduction to CAAnimation. Marin Todorov wrote a wonderful book about CoreAnimation which you can find here. I can’t recommend it enough, especially if you’re starting. Happy animating!]]> In a previous post I was writing about improving working with UIFont and now I’d like to take it one step further in regards with having a quick and easy way to set fonts, if you use a single typeface (font family): extension UIFont { static func regular(_ size: CGFloat) -> UIFont { return .systemFont(ofSize: size, weight: .regular) // Or any other font. } static func medium(_ size: CGFloat) -> UIFont { return .systemFont(ofSize: size, weight: .medium) } } This might not seem much, or maybe I’m just lazy, but I find it easier to write and read let nameLabel = UILabel() nameLabel.font = .regular(15) than ]]>]]> let nameLabel = UILabel() nameLabel.font = .systemFont(ofSize: size, weight: .regular) The other week we released My Travel Stories, an app to journal your travels, share beautiful photos with the world, but also find inspiration from others. There are many apps you could use to journal your travels, be it diary apps, or the stock Photos app; but none of them are a true, focused, travelling journal app. My Travel Stories is a dedicated app, where you can add photos and descriptions for each; nothing more, nothing less. There are also many review sites and many platforms to share you photos/opinions on, but none of them has true human-to-human interactions. At least that’s how I feel. When you write down your thoughts in a journal, you write in a certain way; you write for yourself; you write for your future self; you write with empathy; you create memories, not simple reviews. Being social creatures, I really think it’s much more rewarding to read a story, not a simple review; to experience a memory, not read a simple description about a place; to feel another person’s feelings about a photo. And My Travel Stories offers just that: a way to search places and/or follow others to be up-to-date with their entries. You’ll get a glimpse of other people’s lives, as they want it to be remembered by themselves. Travel the world; share your stories; find inspiration; create new memories!]]> A couple of posts ago I was writing about handling the Next button automatically. In this post I’d like to write about avoiding the keyboard automatically, in a manner that provides both a good user experience and a good developer experience. Most apps have some sort of form that requires to be filled, even if just a login/register, if not several. As a user, having the keyboard cover the text field I'm about to fill makes me sad; it's a poor user experience. As developers, we'd like to solve this as easily as possible and have the solution as reusable as possible. What does a good user experience mean? UITextFieldis brought above the keyboard on focus. UITextFieldis ”sent back” on dismiss. What does a good developer experience mean? Everything should happen as automatically as possible, so we’ll go with a protocol once again. What does this protocol need to encapsulate? scrollView.contentInsetand scrollView.contentOffsetin a way that brings the UITextFieldright above the keyboard. With this in mind, let’s build our protocol: protocol KeyboardListener: AnyObject { // 1 var scrollView: UIScrollView { get } // 2 var contentOffsetPreKeyboardDisplay: CGPoint? { get set } // 3 var contentInsetPreKeyboardDisplay: UIEdgeInsets? { get set } // 4 func keyboardChanged(with notification: Notification) // 5 } We need to constrain this protocol to be conformed to only by classes (1) because we’ll need to modify the two preKeyboard properties (3, 4) — we’ll use them to know how to revert the scrollView’s inset and offset on keyboard dismissal — and we’ll most likely implement this in a UIViewController anyway. The protocol also needs to have a scrollView (2), otherwise this isn’t really … feasible (I guess it could be doable). Lastly, we need the method that will handle everything (5), but it just acts as a proxy for two helpers that we’ll implement in just a bit: extension KeyboardListener { func keyboardChanged(with notification: Notification) { guard notification.name == UIResponder.keyboardWillShowNotification, let rawFrameEnd = notification.userInfo?[UIResponder.keyboardFrameEndUserInfoKey], let frameEnd = rawFrameEnd as? CGRect, let duration = notification.userInfo?[UIResponder.keyboardAnimationDurationUserInfoKey] as? TimeInterval else { resetScrollView() // 1 return } if let currentTextField = UIResponder.current as? UITextField { updateContentOffsetOnTextFieldFocus(currentTextField, bottomCoveredArea: frame.height) // 2 } scrollView.contentInset.bottom += frameEnd.height // 3 } } If the notification is not for willShow, or we can not parse the notification’s userInfo, bail out and reset the scrollView. If it is, increase the bottom inset by the keyboard’s height (3). As for (2), we find the current first responder with a little trick to call updateContentOffsetOnTextFieldFocus(_:bottomCoveredArea:) with, but we could also call it from our delegate’s textFieldShouldBeginEditing(_:). The first helper will update our two preKeyboard properties: extension KeyboardListener where Self: UIViewController { // 1 func keyboardChanged(with notification: Notification) { // [...] } func updateContentOffsetOnTextFieldFocus(_ textField: UITextField, bottomCoveredArea: CGFloat) { let projectedKeyboardY = view.window!.frame.minY - bottomCoveredArea // 2 if contentInsetPreKeyboardDisplay == nil { // 3 contentInsetPreKeyboardDisplay = scrollView.contentInset } if contentOffsetPreKeyboardDisplay == nil { // 4 contentOffsetPreKeyboardDisplay = scrollView.contentOffset } let textFieldFrameInWindow = view.window!.convert(textField.frame, from: textField.superview) // 5 let bottomLimit = textFieldFrameInWindow.maxY + 10 // 6 guard bottomLimit > projectedKeyboardY else { return } // 7 let delta = projectedKeyboardY - bottomLimit // 8 let newOffset = CGPoint(x: scrollView.contentOffset.x, y: scrollView.contentOffset.y - delta) // 9 scrollView.setContentOffset(newOffset, animated: true) // 10 } } We will now to update the protocol extension with a Self: UIViewController constraint (1), because we’ll need access to the window. This shouldn’t be an inconvenience, because this protocol will be most likely used by UIViewControllers, but another approach would be to replace all the view.window occurrences with UIApplication.shared.keyWindow or a variation of UIApplication.shared.windows[yourIndex], in case you have a complex hierarchy. We then calculate the minY for the keyboard (2) — we use a parameter for those cases where we have a custom inputView and we’ll call this from textFieldShouldBeginEditing(_:), for example. We then check if our preKeyboard properties are nil and if they are, we assign the current values from the scrollView (3, 4); they might not be nil if we changed them prior to calling this method. We then convert the textField’s maxY in the window’s coordinates (5) and add 10 to it (6), so we have a small padding between the field and the keyboard. If the bottomLimit is above the keyboard’s minY, do nothing, because the textField is already fully visible (7). If the bottomLimit is below the keyboard’s minY, calculate the difference between them (8) so we know how much to scroll the scrollView (9, 10) so that the textField will be visible. The second helper resets our scrollView back to the initial values: extension KeyboardListener where Self: UIViewController { func keyboardChanged(with notification: Notification) { // [...] } func updateContentOffsetOnTextFieldFocus(_ textField: UITextField, bottomCoveredArea: CGFloat) { // [...] } func resetScrollView() { guard // 1 let originalInsets = contentInsetPreKeyboardDisplay, let originalOffset = contentOffsetPreKeyboardDisplay else { return } scrollView.contentInset = originalInsets // 2 scrollView.setContentOffset(originalOffset, animated: true) // 3 contentInsetPreKeyboardDisplay = nil // 4 contentOffsetPreKeyboardDisplay = nil // 5 } } If we have no original insets/offset, do nothing; for example, a hardware keyboard is used (1). If we do, we reset the scrollView to its original, pre-keyboard values (2, 3) and nil-out the preKeyboard properties (4, 5). Using this may vary depending on your needs, but the usual scenario would go like this: final class FormViewController: UIViewController, KeyboardListener { let scrollView = UIScrollView() /* Or if you have a tableView: private let tableView = UITableView() var scrollView: UIScrollView { return tableView } */ // [...] override func viewDidLoad() { super.videDidLoad() NotificationCenter.default.addObserver(forName: UIResponder.keyboardWillShowNotification, object: nil, queue: nil) { [weak self] notification in self?.keyboardChanged(with: notification) } NotificationCenter.default.addObserver(forName: UIResponder.keyboardWillHideNotification, object: nil, queue: nil) { [weak self] notification in self?.keyboardChanged(with: notification) } // And that's it! } // [...] } This was a long post, but we now have a nice ”keep the text field above the keyboard” logic and if we implement all of this alongside the automatic Next button handling, it will be like magic for our users. Check out this post about slightly automating this even further, by implementing the Broadcaster/Listener system and moving the observers in the Broadcaster itself. We wouldn’t need to add observers in our view controllers anymore, we’d just have to call Broadcaster.shared.addListener(self). As usual, let me know if there’s anything that can be improved @rolandleth.]]> Say we have a UILabel where we want to display a birthdate with a full format, and an API from where we get a String? with iso8601 format. One of the ways to do this would be: let dateFromAPI: String? // [...] let dateFormatter = DateFormatter.shared // 1 dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZZZZZ" if let dateString = dateFromAPI, // 2 let date = dateFormatter.date(from: dateString) { // 3 dateFormatter.dateFormat = nil dateFormatter.dateStyle = .full dateLabel.text = dateFormatter.string(from: date) // 4 } DateFormatters are expensive to create, so we either create it here once, or use a shared one (1), we then check to see if we have a date from our API (2), then check if we can transform it to a date (3), then finally we can assign it to our dateLabel.text (4). What about flatMap? let dateFromAPI: String? // [...] let dateFormatter = DateFormatter.shared dateLabel.text = dateFromAPI .flatMap { // 1 dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZZZZZ" return dateFormatter.date(from: $0) // 2 } .flatMap { // 3 dateFormatter.dateFormat = nil dateFormatter.dateStyle = .full return dateFormatter.string(from: $0) // 4 } If dateFromAPI is not nil, the first flatMap closure will be called (1), where we transform the String to a Date (2), which if it’s not nil (3), the second flatMap closure is called, where we transform the date intro a String (4), which will be assigned to dateLabel.text. I’m a bit torn on which one is more readable, but other in other scenarios, using the latter might be preferable.]]> Yesterday we released Goalee, an app that helps you not lose sight of your life’s goals. The main idea behind the app is that all the annoyances, conflicts or so-called problems we face in our everyday lives pale in comparison with our true goals in life. The issue I faced, as many others, is that I tend to lose track of what I desire most, exactly because of problems here and there. One approach is to start writing down your goals on a sheet of paper, which I did and it worked; for a while, because I eventually started overlooking said sheet of paper. I then tried using to-do apps, or habit tracking apps for this purpose. But this isn’t something to be checked off, something to record progress of, nor a habit to build; it’s a mindset change, if you will. So I ended up building Goalee to add long/mid-term goals and remind myself about them on a daily/weekly basis. Start your company one day? Become rich? Settle down? Become famous? Whatever it is, don’t lose focus; don’t forget what you desire most; or it will end up forgotten in the sea of daily struggles and it’s not worth it. Hope you never forget your destination, but also enjoy the journey! Below you can find some promo codes (all gone):: Cool, let’s dive in and start with the protocol. What would we need here?: textFieldand we have empty fields after the current, the return key should be .next(2).: textFieldor all fields after the current one are filled, resign first responder (7).. Update: Shaps wrote a great piece on this topic as well, be sure to check it out!]]> The. I’d like to quickly explain how to let the user pick a photo and automatically extract the location for them. The post is targeted at iOS 11+, because starting with this version, to use an UIImagePickerController we don’t need to ask the user’s permission to access their photo library, because the controller runs as a separate process from the app, which means the app gets read-only access only to the image the user selected, and just to the image — no metadata included. For this, we will need the UIImagePickerController: func pickPhotoFromLibrary() { // 1 guard UIImagePickerController.isSourceTypeAvailable(.photoLibrary) else { return } let imagePicker = UIImagePickerController() imagePicker.delegate = self // 2 imagePicker.sourceType = .photoLibrary // 3 imagePicker.allowsEditing = false // 4 present(imagePicker, animated: true, completion: nil) } First, we need to make sure the type of source we want is available (1), in our case .photoLibrary (2). We’ll then set a delegate where the picker will give us the picked image, set the sourceType (3) and set if the user is allowed to edit the image, or not (4). Next, the bread and butter: // 1 func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) { defer { dismiss(animated: true, completion: nil) } // 2 guard let image = info[UIImagePickerControllerOriginalImage] as? UIImage else { return } // 3 if let asset = info[UIImagePickerControllerPHAsset] as? PHAsset, // 4 let location = asset.location { // 5 CLGeocoder().reverseGeocodeLocation(location) { placemarks, error in // 6 guard let placemark = placemarks?.first else { return } // 7 /* Do something with placemark.name placemark.locality placemark.country ... */ } } // Do something with the image. } This is the method the picker will call after the user has selected his image (1). First thing we do is defer the dismissal of the picker, since the system doesn’t do that by itself (2). Next, we try to extract the image from the info dictionary we have received (3). Now, to extract the location, an UIImage is not enough, we need a PHAsset. To receive a PHAsset in the info dictionary, we do need access to the user’s library, which they may decline, so that’s why we’re using an if let here, instead of trying to extract the asset (4) and its location (5) in the guard above (at 3). After we have the CLLocation (5), we need a CLGeocoder that can reverse-geocode the location via an async request (6), which will return an array of CLPlacemarks. Most of the time, this array will contain only one element, unless the address couldn’t be resolved to a single location, which is why we mostly care about the first element in the array (7). There’s a final catch: as we saw above, to actually receive a PHAsset, we need access to the user’s library, so we need to ask for it first. In order to be allowed to ask this, we need to add a message in our Info.plist under the NSPhotoLibraryUsageDescription key. Let’s update our pickPhotoFromLibrary method to do that: func pickPhotoFromLibrary() { if PHPhotoLibrary.authorizationStatus() == .notDetermined { // 1 DispatchQueue.main.async { // 2 PHPhotoLibrary.requestAuthorization { _ in // 3 DispatchQueue.main.async { // 4 self.pickPhotoFromLibrary() // 5 } } } return } guard UIImagePickerController.isSourceTypeAvailable(.photoLibrary) else { return } let imagePicker = UIImagePickerController() imagePicker.delegate = self imagePicker.sourceType = .photoLibrary imagePicker.allowsEditing = false present(imagePicker, animated: true, completion: nil) } We first check the current status and if it’s .notDetermined — which means we haven’t asked yet — we ask for authorisation (3). Since this will present an alert, we can be extra safe and do it on the main thread (2). At this point, the alert asking for access to their library will be show to the user. Since the user can still pick photos even if they decline, we need an explicit message of what’s happening, otherwise the user might get confused, or worse, panic. I went for something along the lines of: ”To automatically determine a photo's location, we need access to your library. You can still pick photos even if you don't allow, but you'll have to manually enter locations.” After the user taps Allow or Don’t allow, the request’s handler is called, at which point we’ll call pickPhotoFromLibrary again (5), also on the main thread (4). At this point, the authorizationStatus will be determined ( restricted, denied or authorized) and the UIImagePickerController will be presented. There are a few extra steps for offering this small feature, but I think it’s worth it if you need the location, as it brings a nice touch; for us it was.]]> I’ve recently gave Micro.blog a try and shortly after I thought of importing all my tweets here, because … why not own my content? This post will be about extracting and converting your Twitter archive into simpler objects with just text and timestamp — there are many more available fields, but these were the only ones I was particularly interested in. First things first, we need to request your archive: in our Twitter’s profile settings, all the way to the bottom we can find ”Your Tweet archive”; we need to click on ”Request your archive” and after a while we’ll receive an email with a link to the download. The archive contains a single folder with a website structure (so you can open index.html, if you’d like), but we’re only interested in tweets.csv file at the root, next to index.html. This file has the following structure: "tweet_id","in_reply_to_status_id","in_reply_to_user_id","timestamp","source","text","retweeted_status_id","retweeted_status_user_id","retweeted_status_timestamp","expanded_urls" Twitter will replace any URLs you post with URLs and I, personally, didn’t want to use those. And that’s what expanded_urls are there for: it’s a comma delimited string with the actual URLs for all the t.co URLs in a tweet (if any). Off we go: there’s a nice CSV parser (Thanks, naoty!), which we’ll need to add it to the Sources folder of a new Playground and we’ll have to update it to Swift 4 — the compiler will tell us everything that needs to be done. We’ll then add the tweets.csv file to the Playground’s Resources, and we’re good to go. There will be a lot of force unwrapping, but this is intentional, so we can find problems or exceptions and treat them individually. First, a Tweet struct, to hold the final data, and a Syntax enum to dictate what we do with the URLs found in tweets: struct Tweet { let text: String let timestamp: String } enum Syntax { case markdown case html case none } Then, some more preparations: let syntax: Syntax = .markdown // 1 let dataDetector = try! NSDataDetector(types: NSTextCheckingResult.CheckingType.link.rawValue) // 2 let handleRegex = try! NSRegularExpression(pattern: "@[^.,:;\\-()\\[\\]{}\\s]+", options: .caseInsensitive) // 3 Since I wanted to write this post and also create a gist about it, I added a property that will determine the syntax of the URLs (1). Then, an NSDataDetector was needed, to find all the t.co URLs and replace them with the actual URLs from expanded_urls. Lastly, we’ll also want to create URLs for all the handles, so an NSRegularExpression is required (3) — _match all strings that start with @ and contain any character, any number of times, except the ones between brackets. Using the library is totally straightforward: let file = Bundle.main.path(forResource: "tweets", ofType: "csv")! // 1 let csv = try! CSV(name: file) // 2 var rawTweets = csv.rows.filter { // 3 // Used a lot of clients throughout the years, each with its own retweeting format ... let isRetweet = $0["retweeted_status_user_id"]?.isEmpty == false || $0["expanded_urls"]?.contains("") == true || $0["expanded_urls"]?.contains("favd.net") == true || $0["text"]?.contains("via @") == true || $0["text"]?.contains("RT @") == true || $0["text"]?.contains("\"@") == true || $0["text"]?.contains("“@") == true || $0["text"] == "." // Don't ask, I have no idea ... let isReply = $0["in_reply_to_status_id"]?.isEmpty == false || $0["text"]?.hasPrefix("@") == true let isLinkToBlog = $0["expanded_urls"]?.contains("rolandleth.com") == true return !isRetweet && !isReply && !isLinkToBlog // 4 } let tweets = rawTweets.map { rawTweet -> Tweet in // 5 // [...] return Tweet(text: text, timestamp: rawTweet["timestamp"]!) // 6 } We create a path to our file (1) and pass it to CSV (2); we then have access to all tweets via the rows property (3). You might want to skip the next part, but for my own purposes, I only cared about tweets (no retweets, no direct tweets and no replies) because that’s the ”standalone” content (4). I also didn’t care about any tweets where my blog was mentioned (4), since those are most likely just tweets where I shared posts. The map will create an array of Tweets (5, 6). All the code from this point onward is part of the map, corresponding to a single tweet — it will be easier to go through the code like this. var text = rawTweet["text"]! // 1 if syntax == .markdown { // 2 text = text.replacingOccurrences(of: "\n", with: " \n") } var nsText: NSString { return text as NSString } // 3 var textRange: NSRange { return NSRange(location: 0, length: text.utf16.count) } // 4 let expandedURLs = rawTweet["expanded_urls"]!.components(separatedBy: ",") // 5 let reversedMatches = dataDetector .matches(in: text, options: [], range: textRange) // 6 .reversed() // 7 let matchesCount = reversedMatches.count var nonTcoURLs = 0 // 8 First, we extract our text (1); if the syntax is set to Markdown, we add two whitespaces before any new line (2), since that’s what Markdown requires. Then, instead of creating a Range<String.Index> out of an NSRange, it was easier to just convert our text to NSString (3) — but it has to be a computed property, because we’ll be modifying text. For the range we use a computed NSRange with a length of .utf16.count, because there will be emojis (4)! We extract the expanded_urls into an array of Strings (5), so we can replace the matches found by the dataDetector (6). This will be done in reversed order (7), otherwise replacing the first occurrence would break the ranges of the next occurrences, if any. Since Twitter does not convert URLs that are not ”full” ( rolandleth.com, for example), but NSDataDetector does, we need to make sure we’re properly replacing t.co occurrences — for each tweet, we have to keep track of the number of non- t.co URLs (8), so we can adjust the index when reading from our expandedURLs (explained below). reversedMatches.enumerated().forEach { i, m in var url = nsText.substring(with: m.range) // 1 let correctURL: String // 2 if matchesCount > expandedURLs.count, !url.hasPrefix("http") {\(urlName)</a>" // 6 case .none: correctURL = url // 7 } text = nsText.replacingCharacters(in: m.range, with: correctURL) // 8 } We fallback on the original matched URL (1), and if we have more matches than expandedURLs and the current occurrence doesn’t have a prefix of http, we add the prefix ourselves, and increase our nonTcoURLs count (2); otherwise, we replace the t.co occurrence with the corresponding expandedURL, by taking into account the number of nonTcoURLs (3). The reason behind adding the http prefix ourselves is that if we don’t, a server might use these URLs as a dynamic path — for example on, a Markdown URL of example.com would be rendered as. We then remove any http:// or https:// to be used as the URL’s display name (4) and then we can see correctURL’s purpose: to add Markdown syntax (5), HTML syntax (6), or none (7). We then replace the occurrence of the t.co URL with the real one (8). let reversedHandleMatches = handleRegex .matches(in: text, options: [], range: textRange) .reversed() // 1 reversedHandleMatches.forEach { let accountRange = NSRange(location: $0.range.location + 1, length: $0.range.length - 1) let account = nsText.substring(with: accountRange) // 2 let correctHandleURL: String let@\(account)</a>" case .none: correctHandleURL = handleURL } text = nsText.replacingCharacters(in: $0.range, with: correctHandleURL) // 4 } Finally, for the last piece of the puzzle, we will replace all @handles in a tweet by finding all the matches our handleRegex gives us, also in reverse (1). We first extract the handle (2), then we add the correct syntax (3), and we replace the occurrence in the original text with the handle’s URL (4). The whole gist can be found here, and all my tweets can now be found here. This was fun to do 😁]]> U.]]> A few months ago I was writing about a new beginning and I mentioned a couple of projects. I mentioned the first one in a previous post and now I'm writing about the second. The main goal of the app is for users to save their fidelity cards digitally (or request new ones), combined with the conveniences of displaying merchants & their offers, and having a shopping list at hand. There's also a friends feature, with which users can share their shopping lists — shopping together, faster and smarter has never been easier! It's targeted at the local, Romanian, market, but it should be possible to adapt it to any market, if the offer and demand dictates so — if you're a seller/merchant and think it would make sense to integrate it on your local market, don't hesitate to contact us. We can display targeted offers, based on the user's shopping lists, but only if the user decides to — it's an opt-in feature, clearly explained — otherwise the offers are random and/or based on position. The app can be found here.]]> 2 years and a half ago, I was writing about how I recently started using Tower — didn't even realize it's been so long. In that post I was explaining how I solved the fact that it doesn't support opening Pull Requests. Well ... The latest version of Tower now supports it, and I gotta say they're as seamless as one would expect! A few new features: Tower has been one of the few apps that I never replaced after starting using it — I wholeheartedly recommend it. You can grab the public beta here to give the new features a try — you won't be disappointed!]]> The usual solution to observe and broadcast is to use NotificationCenter: final class Post { // 1 var title: String var body: String init(title: String, body: String) { self.title = title self.body = body } } extension Notification.Name { // 2 static let LTHPostReceived = Notification.Name(rawValue: "com.rolandleth.com.postReceivedNotification") } final class PostCreationController: UIViewController { // 3 private let post: Post // [...] private func savePost() { // 4 // [...] let userInfo = ["post": post] // 5 let notification = Notification(name: .LTHPostReceived, object: nil, userInfo: userInfo) // 6 NotificationCenter.default.post(notification) // 7 } // [...] } final class FeedViewController: UIViewController { // 8 // [...] override func viewDidLoad() { super.viewDidLoad() NotificationCenter.default.addObserver(self, // 9 selector: #selector(postReceived), name: .LTHPostReceived, object: nil) } @objc private func postReceived(from notification: Notification) { // 10 guard let post = notification.userInfo?["post"] as? Post else { return } // 11 // Do something with post. } // [...] } Let's use a Post (1) as an example. First of all, we need a Notification.Name extension (2) to create a custom notification name to pass it around. Next, let's imagine a controller where we create a new post (3): in its save method (4), we have to create a userInfo dictionary (5), a Notification (6) and broadcast it (7). Finally, let's imagine a controller to display a feed of posts (8): we need to add ourselves as an observer somewhere (9) and handle the notification when we receive it (10). The biggest downside here is that we need to try and extract our Post from the userInfo dictionary, found under the post key (which is a plain string, leaving room for errors), and only then can we use it. A lot of boilerplate code, not quite safe and not quite pretty to use. I'm sure we can do better, don't you think? Let's start with a broadcaster: final class GlobalBroadcaster { private var listenersTable: NSHashTable<AnyObject> = .weakObjects() // 1 // MARK: - Adding listeners func addListener(_ object: AnyObject) { // 2 listenersTable.add(object) } // MARK: - Helpers private func filteredListeners<T>() -> [T] { // 3 return listenersTable.allObjects.compactMap { $0 as? T } } private func keyboardChanged(with notification: Notification) { // Some keyboard handling logic. } private func setKeyboardObserver() { // 4 NotificationCenter.default.addObserver(forName: .UIKeyboardWillShow, object: nil, queue: nil) { [weak self] notification in self?.keyboardChanged(with: notification) } NotificationCenter.default.addObserver(forName: .UIKeyboardWillHide, object: nil, queue: nil) { [weak self] notification in self?.keyboardChanged(with: notification) } } // MARK: - Init init() { setKeyboardObserver() } // static let shared = GlobalBroadcaster() // 5 } let Broadcaster = GlobalBroadcaster() // 6 The backbone of our broadcaster is the array of listeners (1), backed by an NSHashTable<AnyObject>weakObjects(). An NSHashTable is a collection similar to a Set — we want the objects inside it to be unique — and the .weakObjects initializer means the NSHashTable will store weak references to its contents and no retain cycles will occur—objects will be deallocated properly, instead of being kept alive indefinitely. Next we need a method to add listeners (2), instead of exposing the listenersTable property. When we broadcast something, we will be interested in only one type of listeners, so (3) is a helper to filter only what we need — we'll see in just a bit how this plays out. This approach still lets us use usual NotificationCenter actors (4), but gives us a chance to parse or manipulate objects before exposing them to our app. Finally, we'll be creating a global variable, so our Broadcaster can be available everywhere (6); or we can use a static property on GlobalBroadcaster (5), in which case the class itself could be named Broadcaster — I just like to type a bit less. Next up, listeners. How do we listen and broadcast events? With protocols: protocol PostCreationListener { func handlePostCreationBroadcast(with post: Post) } // Just an example. protocol LoginListener { func handleUserLoginBroadcast(with user: User) func handleUserLogoutBroadcast() } final class FeedController: UIViewController { // [...] override func viewDidLoad() { super.viewDidLoad() Broadcaster.addListener(self) // 1 } // [...] } extension FeedController: PostCreationListener { // 2 func handlePostCreationBroadcast(with post: Post) { // 3 // Do something with post. } } We conform FeedController to PostCreationListener (2), add ourselves as a listener (1) and implement the required method (3) — we'll be able to directly use our post, without String keys and casting. Finally, we also need to broadcast events, right? final class GlobalBroadcaster { // [...] func postCreated(_ post: Post) { // 1 let listeners: [PostCreationListener] = filteredListeners() // 2 listeners.forEach { $0.handlePostCreationBroadcast(with: post) // 3 } } // [...] } final class PostCreationController: UIViewController { // [...] private func savePost() { Broadcaster.postCreated(post) // 4 } // [...] } We'll add a new method on our Broadcaster (1) that uses our previously mentioned filter method: since we declare listeners (2) as [PostCreationListener], the compiler can infer the filteredListeners' T return value. We then have to iterate through all listeners and call handlePostCreationBroadcast:. Lastly, postCreated will have to be called from our PostCreationController (4). It might seem a bit more code, but we now have type-safety, an easy way to extend our listeners via protocols and a central place where we parse or manipulate objects before exposing them to our app. Let me know what you think @rolandleth.]]> A few months ago I was writing about a new beginning and I mentioned a couple of projects. I would like to present one of them today. It took longer than expected, but we launched today and we're really happy about the results. I'll start with a really short intro: Our lives are filled with habits; some are good, some are bad. Everyone wants to get rid of their bad habits, and we all want to increase the number and/or frequency of good habits; we all want to eat healthier, do more sport, be more productive, or procrastinate less. On the other hand, we humans are, by nature, a really sociable species — so much, I'd argue, that things not only get more enjoyable when in the company of others, but they also get easier. So why not change our habits for the better in a way that we're naturally designed — socially? Some of us have competitive tempers, as well, so how can we ultimately combine all of this, to our advantage? The solution: Tiago Fernandes and Christian Boegner brought me on board, and together we launched ChallengeBeat, the app where you can challenge your friends, or even just yourself, to create — and maintain — the habits you always tried to cultivate. From eating healthier to being more productive, from daring challenges to fun ones, like video games, ChallengeBeat can be your go-to app. Starting the challenge requires just a few steps: set an image, give it a short & descriptive title, a small (or long, where needed) description, set a start and end date, invite some friends and you're good to go! Some examples we've run successfully in the past: Not all challenges were successful — we failed quite a few — but with each one that was, we were one step further, had a higher morale and were more confident on that particular topic. We're really happy to announce that the app went live today, and you can download it here. Everything is free, and we want all current features to remain free, forever. If we don't figure something out, at least we'll leave the world a better place — we can't wait to see how people change their lives for the better. In case you like it — and we hope you will — give us a follow and let's change our lives for the better! We're pocketunk@gmail.com, mail@christian-boegner.ch & roland@leth.ro and we'd love a good challenge. In a previous post I talked about a new struct ( LayoutPriority) and a couple of extension methods on NSLayoutConstraint to ease interacting with them. But, as I later discovered, there is no need for the new struct – we can do the same thing on UILayoutPriority itself. Let's quickly see how. First, we move all the properties along with the operators to the extension: extension UILayoutPriority { static let minNonZero = UILayoutPriority(rawValue: 1) static let belowDefaultLow = UILayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue - 1) static let defaultLow = UILayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue) static let aboveDefaultLow = UILayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue + 1) static let belowDefaultHigh = UILayoutPriority(rawValue: UILayoutPriority.defaultHigh.rawValue - 1) static let defaultHigh = UILayoutPriority(rawValue: UILayoutPriority.defaultHigh.rawValue) static let aboveDefaultHigh = UILayoutPriority(rawValue: UILayoutPriority.defaultHigh.rawValue + 1) static let maxNonRequired = UILayoutPriority(rawValue: UILayoutPriority.required.rawValue - 1) } extension UILayoutPriority { static func -(lhs: UILayoutPriority, rhs: Float) -> UILayoutPriority { return UILayoutPriority(rawValue: lhs.rawValue - rhs) } static func +(lhs: UILayoutPriority, rhs: Float) -> UILayoutPriority { return UILayoutPriority(rawValue: lhs.rawValue + rhs) } static func -=(lhs: inout UILayoutPriority, rhs: Float) { lhs = UILayoutPriority(rawValue: lhs.rawValue - rhs) } } Finally, we just have to update the methods in NSLayoutConstraint's extension: extension NSLayoutConstraint { func with(priority: UILayoutPriority) -> NSLayoutConstraint { self.priority = priority return self } } Now we have the same functionality as before, without the need of an extra type; and we get to keep the same short syntax, chaining and operators: someView.leadingAnchor .constraint(equalTo: otherView.leadingAnchor) .with(priority: .defaultHigh - 1) .activate() The other advantage to this approach is that setContentCompressionResistancePriority:for: and setContentHuggingPriority:for: are using UILayoutPriority, so we can now pass our new constants, instead of creating new methods for those, or bridging LayoutPriority to UIKit: // Built-in let priority = UILayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue - 1) view.setContentCompressionResistancePriority(priority, for: .vertical) // Previous view.setContentCompressionResistancePriority(LayoutPriority.belowDefaultLow.toUIKit, for: .vertical) // New view.setContentCompressionResistancePriority(.belowDefaultLow, for: .vertical) In hindsight, I have no idea why I went that route, instead of this one.]]> I. In Swift 4 UILayoutPriority has become a struct, with an initializer and a rawValue, instead of being a rawValue of Float itself. This means that simple assignments became slightly harder: let constraint = someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor) // Swift < 4 constraint.priority = 999 // Swift 4 constraint.priority = UILayoutPriority(rawValue: 999) Besides this, I've always had a pet peeve – activating constraints that require priority manipulation: NSLayoutConstraint.activate([ someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor), someView.trailingAnchor.constraint(equalTo: otherView.trailingAnchor) ]) Setting a priority for the leadingAnchor requires a couple of extra steps: let leadingConstraint = someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor) leadingConstraint.priority = UILayoutPriority(rawValue: 999) NSLayoutConstraint.activate([ leading, someView.trailingAnchor.constraint(equalTo: otherView.trailingAnchor) ]) What if we could solve these two problems almost in one go? At first I thought of creating an enum for this, but I decided to follow Apple's approach and created a struct, so let's do that: struct LayoutPriority { var rawValue: Float var toUIKit: UILayoutPriority { return UILayoutPriority(rawValue: rawValue) } static let minNonZero = LayoutPriority(rawValue: 1) // 1 static let belowDefaultLow = LayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue - 1) // 2 static let defaultLow = LayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue) static let aboveDefaultLow = LayoutPriority(rawValue: UILayoutPriority.defaultLow.rawValue + 1) // 3 static let belowDefaultHigh = LayoutPriority(rawValue: UILayoutPriority.defaultHigh.rawValue - 1) // 4 static let defaultHigh = LayoutPriority(rawValue: UILayoutPriority.defaultHigh.rawValue) static let aboveDefaultHigh = LayoutPriority(rawValue: UILayoutPriority.defaultHigh.rawValue + 1) // 5 static let maxNonRequired = LayoutPriority(rawValue: UILayoutPriority.required.rawValue - 1) // 6 static let required = LayoutPriority(rawValue: UILayoutPriority.required.rawValue) // 7 // MARK: - Init init(rawValue: Float) { self.rawValue = rawValue } } It contains all the values from the UIKit version, and a few convenience ones (1-7), for the most used custom scenarios. For example, if you have two views with equal compressionResistance ( rawValue = 750) and the Auto Layout engine can't solve the constraints, it might require you to change one of the views' compressionResistance to 749 or 751 to break the tie; now we have belowDefaultHigh and belowDefaultLow for that. This brings us one step closer to our final goal – less typing, more autocomplete: let constraint = someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor) constraint.priority = LayoutPriority.maxNonRequired.toUIKit Second step would be to create an NSLayoutConstraint extension: extension NSLayoutConstraint { // This is probably not the best name, since it mutates `self`, but I thought it's better than add/set/change. @discardableResult func with(priority: LayoutPriority) -> NSLayoutConstraint { self.priority = UILayoutPriority(rawValue: priority.rawValue) return self } } And we reached our final goal, of setting priorities directly inline: NSLayoutConstraint.activate([ someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor) .with(priority: .maxNonRequired), someView.trailingAnchor.constraint(equalTo: otherView.trailingAnchor), .with(priority: LayoutPriority(rawValue: 123)) ]) We can go an extra mile and add a property to the NSLayoutConstraint extension, that can bridge between LayoutProperty and UILayoutProperty: extension NSLayoutConstraint { var layoutPriority: LayoutPriority { get { return LayoutPriority(rawValue: priority.rawValue) } set { priority = UILayoutPriority(rawValue: newValue.rawValue) } } } Although this won't bring that much value, since we have the previous helper function, it does bring some, for example when changing between two layouts, based on a condition: if layout1Condition { someConstraint.layoutPriority = .minNonZero someOtherConstraint.layoutPriority = .maxNonRequired } else { someConstraint.layoutPriority = .maxNonRequired someOtherConstraint.layoutPriority = .minNonZero } Lastly, we can take this one step even further, by adding operators on our LayoutPriority: extension LayoutPriority { static func -(lhs: LayoutPriority, rhs: Float) -> LayoutPriority { return LayoutPriority(rawValue: lhs.rawValue - rhs) } static func -=(lhs: inout LayoutPriority, rhs: Float) { lhs = LayoutPriority(rawValue: lhs.rawValue - rhs) } // ... } Now we can more easily handle constraints dependent on other: let constraint = someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor) constraint.layoutPriority = .belowDefaultHigh let otherConstraint = otherView.leadingAnchor.constraint(equalTo: otherView.otherAnchor) otherConstraint.layoutPriority = constraint.layoutPriority - 1 And why not have two more additions to our NSLayoutConstraint extension? extension NSLayoutConstraint { @discardableResult func activate() -> NSLayoutConstraint { isActive = true return self } @discardableResult func deactivate() -> NSLayoutConstraint { isActive = false return self } } These won't bring much value, either, unless you like to chain things, like me: ]]>]]> let constraint = someView.leadingAnchor.constraint(equalTo: otherView.leadingAnchor) constraint.priority = UILayoutPriority(rawValue: UILayoutPriority.defaultHight.rawValue - 1) constraint.isActive = true // vs someView.leadingAnchor .constraint(equalTo: otherView.leadingAnchor) .with(priority: .defaultHigh - 1) .activate() One of my biggest dreams has always been to create a great product of my own, and/or be part of a great team that creates one. Today I took a step towards this dream, and I'm happy to announce that I started a small software company. For now it's mostly me, but I hope in time it'll grow. Such a mixture of feelings ... Excited. Scared. Eager. Patient. Hard working. Tired. Restless. Hopeful. Joyful. By the end of the year we'll have two projects finished, and I can't wait to show them to you. Depending on the project and deadline, we might also be available for collaboration/hire, so don't hesitate to contact us.]]> Took a while, but it was a bit ... nostalgic 😁. Please let me know if I missed anything.]]> The.]]> I guess it's that time of year again, when I change the editor I use to write. The last editor I was using, LightPaper, hasn't seen any updates in the past year and a half, which is a bummer, because I really think it has potential, and it's already a pretty good app; it just didn't fit my needs/desires. I know, I'm spoiled when it comes to editors, but I'm also a sucker for clean, dark themes. Recently, I stumbled across Caret, which has everything I look for: in-line Markdown highlighting, syntax assistance, live and customizable (through CSS) preview, an easy-to-the-eyes dark theme and a great, language-aware, syntax theme for code blocks. Check the full features list here, and maybe give the app a try as well. It has some rough edges here and there, but I definitely think it's worth it.]]> Truth)]]> Last */ } I } Say.]]> I.]]> Sitemaps are used by search engines to know what pages to crawl; they're basically a list of all the URLs available on a website. Last time we saw how to manually create an RSS feed by manually creating an XML document, and a sitemap is also just an XML document. First, let's add the route to our droplet: func addRoutes() -> Droplet { get("/sitemap.xml", handler: SitemapController.create) // [...] } As we saw in the previous post, we need a controller that has a method with the same signature as the handler: struct SitemapController { static func create(with request: Request) throws -> ResponseRepresentable { // 1 request.setValue("application/xml", forHTTPHeaderField: "Content-Type") // 2 let posts = try Post.query().sorted().run() } } We start by setting the content type to XML (1) again, and by fetching our posts (2). We won't be checking if there are no posts, because in that case, we will return all the static pages. static func create(with request: Request) throws -> ResponseRepresentable { request.setValue("application/xml", forHTTPHeaderField: "Content-Type") // 1 let noPriority = [ "privacy-policy", "feed" ] // 2 let lowPriority = [ "projects/bouncyb", "projects/sosmorse", "projects/iwordjuggle", "projects/carminder" ] // 3 let highPriority = [ "about", "projects", "projects/expenses-planner" ] let posts = try Post.query().sorted().run() let urls = noPriority + lowPriority + highPriority let" // 4 func priority(for url: String) -> Float { if highPriority.contains(url) { return 0.9 } if lowPriority.contains(url) { return 0.3 } return 0.1 } // 5 urls.forEach { xml += "<url>" xml += "<loc>\(root)\($0)</loc>" xml += "<changefreq>yearly</changefreq>" xml += "<priority>\(priority(for: $0))</priority>" // 6 xml += "</url>" } // 7 posts.forEach { xml += "<url>" xml += "<loc>\(root)\($0.link)</loc>" xml += "<changefreq>monthly</changefreq>" xml += "<priority>0.5</priority>" // 8 xml += "<lastmod>\($0.modified)</lastmod>" xml += "</url>" } xml += "</urlset>" // 9 return xml // 10 } A sitemap entry has an optional property called priority, which tells the crawler how important a page is, in comparison with the others. The range for this property is 0-1, with a default value of 0.5. In my particular case, I wanted some pages to be totally unimportant (1), some to have low priority, because they are old projects (2), and some to have high priority (3). We iterate through all URLs (5), and with the help of a function (4), we set its desired priority (6). We then continue by iterating through all posts (7), and this time we set the priority to 0.5 (8), just to be explicit about what's happening: we want all posts to have neutral priority; they are all equally important. Finally, we close the urlset tag (9), and return the xml string (10), wrapping up the sitemap creation. Back when the blog was written in Ruby, I wrote that I eventually went ahead and implemented an RSS feed, and that it was much easier than expected. Turns out I didn't even need a library to do it, it's only a matter of creating an XML document. Sure, it might be prettier to assign properties (e.g rss.item.content = "content here") versus creating the document manually (e.g xml += <content>content here</content>), but with proper formatting, the latter can look pretty great too. Besides, printing the document, or looking at the source in a browser will be easier to track / spot bugs. Let's start with adding the route to our droplet: func addRoutes() -> Droplet { get("/feed", handler: FeedController.create) // [...] } Let's now create our Feed controller, that needs to have a method with the same signature as the handler: struct FeedController { static func create(with request: Request) throws -> ResponseRepresentable { // 1 let posts = try Post.makeQuery().sorted().all() guard !posts.isEmpty else { return Response(redirect: "/") } // 2 request.setValue("application/xml", forHTTPHeaderField: "Content-Type") // 3 var\n" xml += "<title type=\"text\">Roland Leth</title>\n" // [...] posts.forEach { xml += "<entry>\n" // 4 xml += "\t<id>\(url)</id>\n" // 5 xml += "\t<title>\($0.title)</title>\n" // [...] xml += "</entry>\n" } xml += "</feed>" return xml // 6 } In here, we have to set the content type to XML (2), fetch the posts (1), and create our document, bit by bit (3), finally returning the xml string (6). This might be a lot of xml += ... typing, but it keeps everything nicely aligned. Also, the hassle of adding tabs (4) and new lines (5) all over the place will pay off when printing / looking at the source in a browser. You can check the whole file here. This is a bit tricky, since my routes for posts and pages are the same, and I differentiate between the two if I can create an Int out of the lastPathComponent. I know it's not the best approach, but since URLs should be permanent, I never moved to a /page/x structure. I also kind of dislike that structure ¯\_(ツ)_/¯. In the first post in this series, I briefly presented the Droplet extension, with a very basic addRoutes method, just to present the methods in the extension itself. Let's give it a few routes: extension Droplet { func addRoutes() -> Droplet { get("/feed", handler: FeedController.create) get("/about", handler: AboutController.display) // [...] // Both these methods are resolved by the same method within `PageController`, // but in the first case, a parameter named `id` can also be extracted. get("/", ":id", handler: PageController.display) get("/", handler: PageController.display) return self } } The display(with:) method of our PageController looks like this: import Vapor import HTTP import VaporPostgreSQL struct PageController { // This has the same signature as the `handler` in the Droplet's `get` method static func display(with request: Request) throws -> ResponseRepresentable { let params = request.parameters // If any query params are present (who does that, even?), // just redirect to an URL without them. // If we can convert the `id` parameter to an `Int`, then we are asking for a page. if let page = params["id"]?.int { // If the page is 0, 1, or less than 0, we just redirect to `/`. guard page > 1 else { return request.rootRedirect } guard request.uri.query?.isEmpty != false else { return Response(headers: request.headers, redirect: "/\(page)") } // Return the required page. return try display(page: page, with: request) } // If we can not convert it to an `Int`, but we can convert it to a `String`, it's a post. else if let id = params["id"]?.string { guard request.uri.query?.isEmpty != false else { return Response(headers: request.headers, redirect: "/\(id)") } // Let PostController take it from here. return try PostController.display(with: request, link: id) } // Otherwise we asked for the root. else if request.uri.path == "/" { if request.uri.query?.isEmpty == false { return request.rootRedirect } // Return the first page. return try display(page: 1, with: request) } // Let NotFoundController take it from here, which simply displays the `404` leaf. return try NotFoundController.display(with: request) } } The display(page:with:) method doesn't do much, it just creates a dictionary of parameters, passes everything to a ViewRenderer extension method, which, in turn, displays the article-list leaf: struct PageController { // [...] private static func fetchPosts(for page: Int, with request: Request) -> [Post] { let posts = try? Post.makeQuery().sorted().paginated(to: page).all() return posts ?? [] } private static func display(page: Int, with request: Request) throws -> ResponseRepresentable { // If no posts our found, go to `404`. guard case let posts = fetchPosts(for: page, with: request), !posts.isEmpty else { return try NotFoundController.display(with: request) } // We will need the total number of posts, so we can calculate the total number of pages. let totalPosts = try Post.query().count() let params: [String: NodeRepresentable] = [ "title": "Roland Leth", "metadata": "iOS, Ruby, Node and JS projects by Roland Leth.", "root": "/", "page": page ] return try drop.view.showResults(with: params, for: request, posts: posts, totalPosts: totalPosts) } } We proxy through the showResults method, because displaying search results makes use of the same leaf, and requires a set of common parameters, as we can see below: extension ViewRenderer { func showResults(with params: [String: NodeRepresentable], for request: Request, posts: [Post], totalPosts: Int) throws -> ResponseRepresentable { let baseParams: [String: NodeRepresentable] = [ "gap": 2, // This and the one below are required for creating the pagination control. "doubleGap": 4, "posts": posts, // The required posts. "pages": Int((Double(totalPosts) / Double(drop.postsPerPage)).rounded(.up)), // The number of pages. "showPagination": totalPosts > drop.postsPerPage // Determines whether we show the navigation control or not. ] let params = params + baseParams return try make("article-list", with: params, for: request) } func make(_ path: String, with params: [String: NodeRepresentable], for request: Request) throws -> View { let footerParams: [String: NodeRepresentable] = [ "quote": quote, "emoji": emoji, "fullRoot": request.domain, "trackingId": drop.production ? "UA-40255117-4" : "UA-40255117-5", "production": drop.production, "year": Calendar.current.component(.year, from: Date()) ] let metadataParams: [String: NodeRepresentable] = [ "path": request.pathWithoutTrailingSlash, "metadata": params["title"] as? String ?? "" // Will be overwritten if it exists in the next step ] let params = footerParams + metadataParams + params return try make(path, params, for: request) } } The make(_:with:for) method in the extension is used throughout the app instead of the default make(_:_:for:) so we can pass common parameters to all pages, required in the head and footer, for example. The PostController is rather short, it just displays the post leaf: import Vapor import HTTP import VaporPostgreSQL struct PostController { private static func fetchPost(with link: String) throws -> Post { // Do a query to fetch all posts mathing the current `link` passed in (our URL's `lastPathComponent`, basically). let query = try Post.makeQuery().filter("link", .equals, link) guard let result = try? query.first(), let post = result else { throw Abort.notFound } return post } static func display(with request: Request, link: String) throws -> ResponseRepresentable { do { let post = try fetchPost(with: link) let params: [String: NodeRepresentable] = [ "title": post.title, "post": post, "singlePost": true] // Using the same extension method mentioned earlier. return try drop.view.make("post", with: params, for: request) } catch { // If anything goes wrong, display the `404`. return try NotFoundController.display(with: request) } } } Lastly, you probably wondered about: private static func fetchPosts(for page: Int, with request: Request) -> [Post] { // I do it like this, because I don't want to handle errors, I just care whether it succeeded or not. let posts = try? Post.makeQuery().sorted().paginated(to: page).all() return posts ?? [] } Those are just a couple of Query extensions, where Query is an abstract database query model, much friendlier to reason with than raw queries, and run() does just that, it runs the query: ]]>]]> import Fluent extension Query { func sorted(future: Bool = false) throws -> Query { let q = try self .sort("datetime", .descending) .sort("title", .ascending) // Equivalent to the raw `ORDER BY datetime DESC, title ASC` // This is a flag which I use internally, for when I sometimes write posts with a future date, want to have them synced, // but I don't want them to be displayed yet. // I do want them in the sitemap.xml, though, thus this logic here. if future { return q } return try q.filteredPast() } func paginated(to page: Int) throws -> Query { // Equivalent to the raw `LIMIT \(drop.postsPerPage) OFFSET \(drop.postsPerPage * (page - 1))` return try limit(drop.postsPerPage, offset: drop.postsPerPage * (page - 1)) } func filteredPast() throws -> Query { // Equivalent to the raw `WHERE datetime <= '\(Post.datetime(from: Date())'` return try filter("datetime", .lessThanOrEquals, Post.datetime(from: Date())) } } Let's start by defining our Post model: struct Post { let title: String var rawBody: String { // The original, markdown body. didSet { // For updating body, truncatedBody and readingTime automatically. // didSet doesn't get called on init too, sadly. } } fileprivate(set) var body: String // The html body. var truncatedBody: String // The html body, truncated to x chars. fileprivate(set) var readingTime: String let datetime: String // The date, in yyyy-MM-dd-HHmm format. let link: String // The link, created from the title, in post-title format. let date: String // The short date, to be used as subtitle. var modified: String // The last modified date, in datetime format. } I'm not going into details on how body truncation, Markdown -> HTML conversion and reading time work, as they can be found here, and they are pretty nicely documented. Next, making Post database compliant: Vapor contains a framework called Fluent, an ORM tool for a handful of database providers, and we'll use it for talking with PostgreSQL. We have to conform to the NodeInitializable, NodeRepresentable, Preparation and Model protocols: final class Post: NodeInitializable { let storage = Storage() // Indicates if the object was retrieved from the database, or created. // Do not modify it directly. var exists = false // [...] init(row: Row) throws { title = try row.get("title") body = try row.get("body") rawBody = try row.get("rawbody") truncatedBody = try row.get("truncatedbody") datetime = try row.get("datetime") date = try row.get("date") modified = try row.get("modified") link = try row.get("link") readingTime = try row.get("readingtime") } init(node: Node) throws { title = try node.get("title") body = try node.get("body") rawBody = try node.get("rawbody") datetime = try node.get("datetime") modified = try node.get("modified") link = try node.get("link") truncatedBody = try node.get("truncatedbody") readingTime = try node.get("readingtime") date = try node.get("date") } } // Will be in the same file as well, just because it's really short. extension Post: NodeRepresentable { // Helps Fluent save a Post to the database. func makeNode(context: Context) throws -> Node { return try Node(node: [ "id": id, "title": title, "body": body, "rawBody": rawBody, "truncatedBody": truncatedBody, "datetime": datetime, "link": link, "readingTime": readingTime, "date": date, "modified": modified] ) } } // Post+Model.swift: extension Post: Model { func makeRow() throws -> Row { var row = Row() try row.set("title", title) try row.set("body", body) try row.set("rawbody", rawBody) try row.set("truncatedbody", truncatedBody) try row.set("datetime", datetime) try row.set("date", date) try row.set("modified", modified) try row.set("link", link) try row.set("readingtime", readingTime) return row } } extension Post: Preparation { // This tells Fluent how the table schema should look like. // These will be all lowercase, because PostgreSQL column names are all lowercase. static func prepare(_ database: Database) throws { try database.create(self) { posts in posts.id() posts.string("title", length: 9_999, optional: false, unique: false, default: nil) posts.string("body", length: 999_999, optional: false, unique: false, default: nil) posts.string("rawbody", length: 999_999, optional: false, unique: false, default: nil) posts.string("truncatedbody", length: 1200, optional: false, unique: false, default: nil) posts.string("datetime", length: 15, optional: false, unique: false, default: nil) posts.string("date", length: 12, optional: false, unique: false, default: nil) posts.string("modified", length: 15, optional: false, unique: false, default: nil) posts.string("link", length: 100, optional: false, unique: true, default: nil) posts.string("readingtime", length: 15, optional: false, unique: false, default: nil) } } // This tells Fluent what reverting means, in our case it will just drop the table. static func revert(_ database: Database) throws { try database.delete(self) } } Configuring PostgreSQL is a matter of filling the postgresql.json file, located in Config/secrets: { "host": "127.0.0.1", "user": "roland", // Usually your Mac's username. "password": "", "database": "roland", // Usually your Mac's username. "port": 5432 } Now, making use of PostgreSQL is as easy as: ]]>]]> // Saving: // Has to be a variable, because the save() method is mutating - it updates the id field. var post = Post(title: "Test title", rawBody: "Test body", datetime: "2017-03-09-1550") try post.save() // Fetching: let posts = try Post.all() let firstPost = posts.first try firstPost?.delete() // More complex fetching: let sortedPosts = try Post.makeQuery() .sort("datetime", .descending) .sort("title", .ascending) sortedPosts.forEach { print($0.datetime + ": " + $0.title) } The first post in the Server Side Swift series will be about initializing the project, its structure, and configuring your Droplet. Running the vapor xcode -y command (although I always run swift build first) will download dependencies, build them, configure an Xcode project, and you will end up with a structure like this: ___ Config |___ secrets (optional) - This should be in your .gitignore. |___ production (optional) |_ app.json |_ servers.json ___ Localization - Translation files. |_ xx-YY.json ___ Packages - This is where Vapor installs your packages, and links them in Xcode under the Sources folder group. ___ Public - All files that are public should go here, like downloadable assets, CSS, scripts, etc. |___ images |___ scripts |___ stylesheets |_ favicon.ico ___ Resources - Not really sure what else can go in here except views. |___ Views |___ Partials | |_ article.leaf | |_ footer.leaf | |_ ... other partials. |___ Standalone | |_ iwj.html | |_ ... other standalone pages. |_ about.leaf |_ base.leaf |_ ... other pages. ___ Sources - Not really sure what else can go here except your app files. |___ App |___ Controllers | |_ AboutController.swift | |_ SearchController.swift | |_ ... etc. |___ Models | |_ Post.swift | |_ File.swift |___ ... etc. |_ main.swift ___ Tests |___ AppTests |_ PostTests.swift |_ FileTests.swift |_ ... etc. _ Package.swift _ ... misc, like README, etc vapor cleanwill delete everything in your Packagesfolder, in case you'd like to rebuild everything. Config/secretsfolder should contain all your private configurations. Testswill require another vapor xcode -ycall, so the proper target(s) is created. Sourcesgroup will contain all the packages, along with what you put there. Testsfolder will be imported under the same name, but, test targets with the same name will be created as well, so name them accordingly. Finally, let's quickly cover the droplet. The first file you get after creating your project is main.swift, that initializes a Droplet, and offers a route example. I took a different approach, and created two methods in an extension that configure the Droplet, so that I can test them: import Vapor // Be sure to have added a PostgreSQL provider to your Packages: // .Package(url: "", majorVersion: 1) import VaporPostgreSQL extension Droplet { static func setUp() throws -> Droplet { let config = try Config() // [...] Contains more stuff that we'll cover in later posts. config.preparations.append(Post.self) // This tells Vapor that Post is a model. try config.addProvider(PostgreSQLProvider.Provider.self) // This tells Vapor to use PostgreSQL as the database provider. let drop = try Droplet(config: config) // [...] Contains more stuff that we'll cover in later posts. return drop } func addRoutes() -> Droplet { get("/") { request in return "We got steam!" } // [...] Contains more stuff that we'll cover in later posts. return self } } Now we can have a really simple main.swift file, and easier to test code: ]]>]]> let drop = try Droplet.setUp().addRoutes() try drop.run() // returns Never, so we can't chain it above. I finally decided to migrate the blog from Node.js to Swift, and that's what I've been working on for the past weeks. It's been fun, and in the upcoming posts I will be writing about how it went. I will prefix them with [SSS], just like I did with [NJS]. The repo can be found here. Hope you'll enjoy them as much as I did!]]> break is used to exit the current scope, for example a for loop: for i in 1...10 { guard i < 9 else { break } // Code here. } But what if we have nested loops, and we want to break a specific one? Swift has us covered, with labels: outer: for i in 1...10 { inner: for j in 1...10 { guard i < 9 else { break outer } guard j < 8 else { break inner } // Code here. } } But today I found something interesting: you can label if blocks, as well: // Works with non optional binding ifs as well. abCheck: if let a = b() { guard a.condition else { break abCheck } // Code here. } This won't be that useful, or used that often, but it's going to be a relief for cases like: ]]>]]> if let a = b() { // Do something here. -> This doesn't let us combine the two ifs into one statement. if let c = a.d() { // Do something else here. } } // Do more stuff here. -> This doesn't let us use a guard above. -> abCheck: if let a = b() { // Do something here. guard let c = a.d() else { break abCheck } // Do something else here. -> We gained an indentation level. } // Do more stuff here. I always found Ruby's ||= operator interesting, which says (roughly) assign the right hand side to the left hand side, if the latter is nil (or false). But what about assigning the right hand side to the left hand side, if the former is not nil? Here's my take on these two, with rather intuitive operators (if you ask me, of course): infix operator ??= // If left is nil, then assign func ??=<T>(lhs: inout T?, rhs: T?) { guard lhs == nil else { return } lhs = rhs } infix operator =?? // Assign if right is not nil func =??<T>(lhs: inout T, rhs: T?) { guard let rhs = rhs else { return } lhs = rhs } Now we can write: ]]>]]> var s1: String? = nil var s2 = "s2" s2 =?? s1 // s1 is nil, nothing happens s1 ??= "first" // s1 is nil, assigns the string to it s2 =?? s1 // s1 isn't nil, assigns s1's value to s2 s1 ??= "second" // s1 isn't nil, nothing happens If you're using WhatsApp, you probably saw the images have a small parallax effect, in the opposite scrolling direction. This gives the impression that the image is on a deeper level, and the "image container" is a window: if you climb on a chair, it's as if you lowered the window (you scrolled down), and now you can see more of the landscape below the window (it scrolled up); and crouching would have the opposite effect. One of the biggest downsides to this, is that you need to force your image to be bigger than what the user sees: if you want to have a +/- 10px movement, you need to make it 20px taller, and wider by an amount that would keep the ratio the same. The first step is to wrap the imageView in a container, so that we can move it up and down. It would have the same size as the original imageView, and clipsToBounds set to true. The second and third steps would be to create a method inside the cell, to be called from the collectionView's scrollViewDidScroll, and the last step is to update the imageView's frame / constraint. We can describe this behavior by splitting it in three positions, and for simplicity's sake, we will assume that we have a fullscreen collectionView: imageViewbe inside its container, with -10pxon top and +10pxat the bottom maxYis equal to the top of the screen, the imageView's top should have 0pxon top and -20pxat the bottom xis equal to the height of the screen, the imageView's top should have 0pxon top and -20pxat the bottom Let's start with the collectionView: func scrollViewDidScroll(_ scrollView: UIScrollView) { collectionView.visibleCells.forEach { ($0 as? CustomCell)?.updateImage(in: self.collectionView, in: self.view) } } And continue with the logic from within the cell, where if we had a property like this: private lazy var imageView: UIImageView = { let iv = UIImageView(image: UIImage(named: "landscape")) self.contentView.addSubview(iv) iv.translatesAutoresizingMaskIntoConstraints = false NSLayoutConstraint.activate([ iv.topAnchor.constraint(equalTo: self.contentView.topAnchor, iv.bottomAnchor.constraint(equalTo: self.contentView.bottomAnchor, constant: -60, // To accomodate some labels below. iv.leadingAnchor.constraint(equalTo: self.contentView.leadingAnchor, iv.trailingAnchor.constraint(equalTo: self.contentView.trailingAnchor)] ) return iv }() We would need to make a few changes, that we'll break down right after: private var imageViewTop: NSLayoutConstraint? = nil // 1 private lazy var imageView: UIImageView = { let container = UIView() self.contentView.addSubview(container) // 2 container.translatesAutoresizingMaskIntoConstraints = false NSLayoutConstraint.activate([ // 3 container.topAnchor.constraint(equalTo: self.contentView.topAnchor, container.bottomAnchor.constraint(equalTo: self.contentView.bottomAnchor, constant: -60, container.leadingAnchor.constraint(equalTo: self.contentView.leadingAnchor), container.trailingAnchor.constraint(equalTo: self.contentView.trailingAnchor)] ) let iv = UIImageView(image: UIImage(named: "landscape")) container.addSubview(iv) // 4 iv.translatesAutoresizingMaskIntoConstraints = false self.imageViewTop = iv.topAnchor.constraint(equalTo: container.topAnchor) // Its constant doesn't matter right now, // because it will get updated instantly by the call from scrollViewDidScroll. NSLayoutConstraint.activate([ // 5 imageViewTop!, iv.heightAnchor.constraint(equalTo: container.heightAnchor, constant: 20), iv.centerXAnchor.constraint(equalTo: container.centerXAnchor)] // 6 ) return iv // 7 }() We need a new property for its topAnchor constraint (1) since that's what we'll be using to create the parallax effect, add the imageView to a container (4) and add that to our contentView (2), add the previous constraints to the container (3), and align our imageView with the container (5). Since we're increasing the height by 20 for the parallax, we would also need to increase the width in such a manner to preserve the image's ratio; then we'd have to divide that value by 2, and use it as a constant for the imageView's leading and trailing constraints. But, instead of complicating things like this, we're actually using centerXAnchor (6), because the width will be automatically set based on its intrinsic size. We will still return the imageView, so it can be easier to reason with, to set its image, for example. We can always and safely access the container via imageView.superview! if needed, since we're sure it exists. Lastly, where the real magic happens, the updateImage method: func updateImage(in collectionView: UICollectionView, in view: UIView) { let rect = collectionView.convert(frame, to: view) // 1 // We have some labels below the imageView. let containerMaxY = rect.maxY - 60 let topInset = collectionView.contentInset.top // 2 let bottomInset = collectionView.contentInset.bottom // 3 let parallaxRatio = (containerMaxY - topInset) / (view.height + containerHeight - topInset - bottomInset) // 4 imageTopConstraint?.constant = -20 * min(1, max(0, parallaxRatio)) // 5 } Let's break all of this down: We first need the cell's rect in the collectionView's containing view coordinates (1). Our scenario is easy, with the labels' height known at 60, but we could also have a more complex scenario, with different heights for portrait / landscape; in that case we'd have to jump through a couple more steps to find containerMaxY: let containerHeight = imageView.superview!.frame.height let labelsHeight = rect.height - containerHeight let containerMaxY = rect.maxY - labelsHeight The three positions we used in the example at the start were for a fullscreen collectionView, but we might also need to take into consideration its insets (2, 3), because we want to finish the parallax the moment the cell is not visible anymore, for the most accurate effect. Our three positions translate into these values: top - containerMaxY == topInset, parallaxRatio == 0- the image is at its lowest point bottom - containerMaxY == view.height + containerHeight - bottomInset, parallaxRatio == 1- the image is at its highest point center - containerMaxY == (view.height - bottomInset) * 0.5 + containerHeight * 0.5, parallaxRatio == 0.5- the image is centered For the bottom of the imageView we only take into consideration the top inset (4), because that's the only one affecting its final value in relation with the parent view. For the bottom of the parent view we take into consideration both the top inset and the bottom inset (4), because both affect the final value (the point where the image is leaving the visual field). Since collectionView.visibleCells also returns cells that are below the top / bottom bars, even though they're not visible to the user, it's still good practice to limit the parallax values (5). As always, I'd be more than happy to hear your feedback and discuss @rolandleth.]]> Recently,.]]> I.]]> Working with Core Data is getting easier and easier, but there are a couple of improvements I'd like to talk about, and I'd like to start with the auto-generated, generic NSFetchRequest. It's a step in the right direction, but the problem is that trying to use it without explicitly declaring its type won't work: class ProductModel: NSManagedObject { } let request = ProductModel.fetchRequest() // <- Ambiguous use of fetchRequest(). let request1: NSFetchRequest<ProductModel> = ProductModel.fetchRequest() // <- Works properly. Update, Oct 10, 2017: It has been solved, all of this is now redundant. Yay! I do hope it's just a matter of time until it's solved, but in the meantime, I thought it can be improved a bit, by making use of protocols: protocol CoreModel { associatedType Entity: NSManagedObject } extension CoreModel { private static var name: String { return String(describing: Entity.self) } static var request: NSFetchedRequest<Entity> { return NSFetchRequest<Entity>(entityName: name) } } We have a computed name, and we use that to compute a generic NSFetchRequest, of the required type. We can now make use of this like so: class ProductModel: CoreModel { typealias Entity = ProductModel } [...] let request = ProductModel.request // This is an NSFetchRequest<ProductModel> let results = context.fetch(request) // This would properly return [PersonModel] There's one more improvement I'd like to mention, and it's related to the insertion of a model. When we fetch some data over the network we should first verify if it exists locally, and update that instead of trying to create a new one directly: protocol CoreModel { static func create(in context: NSManagedObjectContext = theMainContext, id: String) -> Entity { let request = self.request request.fetchLimit = 1 request.predicate = NSPredicate(format: "id == %@", id) if let result = (try? context.fetch(request))?.first { return result } let entity = NSEntityDescription.insertNewObject(forEntityName: name, into: context) as! Entity entity.setValue(id, forKey: "id") return entity } } [...] let product = ProductModel.create(id: "1") // product.id = "1" -> This can be omitted. product.title = "Bike" [...] // Save the context. Here, we make use of the aforementioned generic request, set its limit to 1, since an id should be unique, and we check if it returns a result. If it does, we return it, otherwise we insert a new one, set the value of its id field to the passed in value. Note: it's usually not a good practice to handle Core Data via setValue(_:forKey:), but use the model's properties instead; here, though, we can be quite sure that we won't misspell id, and we can also save a line of code when calling create. Let me know @rolandleth if there's anything that can be improved, I'd love to chat about it.]]> I was in need of a radio button recently, and it had to be designed like a Google Material Design radio button. Nothing too fancy, but I wanted to spice it up a little bit, by adding a nice animation. The idea was to: This is the result: The library repo can be found here, and its CocoaPods page here. It can be installed either by dragging the contents of the source folder to your Xcode project, either by adding pod 'LTHRadioButton' to your Podfile. It's pretty straightforward to use: selectedColorand deselectedColorhave been made publicly customizable for cases like a tableViewwith alternating row and radio colors, where the tableViewmight dequeue a cell with one color for displaying a cell with a different color. isSelected- Indicates whether the radio button is selected. ]]>]]> init(diameter: CGFloat = 18, selectedColor: UIColor? = nil, deselectedColor: UIColor? = nil) // Colors default internally if nil. func select(animated: Bool = true) // Selects the radio button. func deselect(animated: Bool = true) // Deselects the radio button. Say we have a composed string that looks like this: let date = "22 July, 2017" let value = "€ 148" let quantity = 5 let string = "\(quantity) of your items, in value of \(value), have been delivered on \(date)." // 5 of your items, in value of € 148, have been delivered on 22 July, 2017. It would be nice to emphasize the important bits, and the usual approach would be to create an NSMutableAttributedString, and to add the required attributes: let attributedString = NSMutableAttributedString( string: string, attributes: [ .font: UIFont.systemFont(ofSize: 14) .foregroundColor: UIColor.gray ] ) attributedString.addAttributes( [ .foregroundColor: UIColor.darkGray, .font: UIFont.boldSystemFont(ofSize: 15) ], range: (string as NSString).range(of: date) ) attributedString.addAttributes( [ .foregroundColor: UIColor.darkGray, .font: UIFont.boldSystemFont(ofSize: 15) ], range: (string as NSString).range(of: value) ) attributedString.addAttributes( [ .foregroundColor: UIColor.darkGray, .font: UIFont.boldSystemFont(ofSize: 15) ], range: (string as NSString).range(of: quantity) ) That's a bit of a mouthful, if you ask me. forEach to the rescue! let attributedString = NSMutableAttributedString( string: string, attributes: [ .font: UIFont.systemFont(ofSize: 14) .foregroundColor: UIColor.gray ] ) [date, value, quantity].forEach { attributedString.addAttributes( [ .foregroundColor: UIColor.darkGray, .font: UIFont.boldSystemFont(ofSize: 15) ], range: (string as NSString).range(of: $0) ) } Slightly more compact, and a bit easier to scan. On a side note, I find it funny how NS(Mutable)AttributedString requires a String, and won't accept an NSString to init, but addAttribute(s) requires an NSRange. ¯\_(ツ)_/¯ Let's say we have a controller that can fetch some data. What would this imply? A loading spinner, the fetching of the data and the update of the UI. We can create a protocol for this, maybe Fetchable: protocol Fetchable { func showSpinner() func fetchData() func updateUI() } [...] class Controller: UIViewController, Fetchable { // showSpinner, fetchData and updateUI are required. } But showing a spinner and updating the UI could be useful by themselves, so we could extract those into separate protocols: protocol Loadable { func showSpinner() } protocol Fetchable { func fetchData() } protocol UIUpdateable { func updateUI() } [...] class Controller: UIViewController, Loadable, Fetchable, UIUpdateable { // showSpinner, fetchData and updateUI are required } But instead of conforming to all three, we can declare Fetchable as conforming to the other two: protocol Fetchable: Loadable, UIUpdateable { func fetchData() } [...] // This is now the equivalent of the above, when we conformed to all three, individually. class Controller: UIViewController, Fetchable { // showSpinner, fetchData and updateUI are required } The advantage is that we can now conform to each protocol individually, and when Fetchable is required, there's slightly less typing. As always, feel free to drop by @rolandleth, I'd love to chat about it. I recently had this problem: at the start of the app there's a call to fetch some reference data, on which other calls depend, but it shouldn't hinder the app launch itself, nor anything else that doesn't depend on it. So, after several approaches, I decided to use DispatchGroups. First, a struct to abstract a DispatchQueue and a DispatchGroup: struct Queue { let group = DispatchGroup() let queue: DispatchQueue init(label: String) { queue = DispatchQueue(label: label) } } This class should have a way to add a closure to the queue, that won't require waiting, basically just abstracting async(execute:): func add(_ closure: () -> Void) { queue.async(execute: closure) } And also a way to add a closure to the queue, that will require waiting: func addAndWait(_ closure: () -> Void) { // Dispatch on our queue queue.async { self.group.enter() // Fire up the closure closure() // And wait for the leave call _ = self.group.wait(timeout: DispatchTime.distantFuture) } } func advance() { // Any form of synonym for continue, I guess group.leave() } Using it feels rather straightforward: let queue = Queue(label: "com.rolandleth.demoapp.loadQueue") queue.addAndWait { API.fetchReferenceData { _ in print("fetched 1") queue.advance() } } queue.add { API.fetchResource1 { _ in print("fetched 2") } } queue.addAndWait { self.timeConsumingJob() print("1") queue.advance() } queue.add { print("2") } queue.addAndWait { API.fetchResource2 { _ in print("fetched 3") queue.advance() } } queue.add { self.jobDependantOnResource2() print("3") } This would print out: fetched 1 fetched 2 1 2 fetched 3 3 // Or if timeConsumingJob() finishes faster than fetchResource1(): fetched 1 1 2 fetched 2 fetched 3 3 I'm sure this isn't perfect, and won't work for everyone, but it should serve some purpose, nonetheless. DispatchSemaphore would have also worked, and so would have subclassing NSOperationQueue, but I found this shorter and easier to abstract. One could improve this by passing a custom timeout and a custom DispatchQoS, for example. Last post in the series is about creating a new post. This wasn't really needed, since the whole point of having my blog in Dropbox was to use any markdown editor I desire, save the file, run the sync command, and have it live, but for the sake of learning, I went ahead and added this feature too. First, the routes: router.get("/create/" + process.env.MY_SYNC_KEY, function(req, res) { res.render("create-post", { title: "Create", metadata: "Create a post" }) }) router.post("/create", function(req, res) { if (req.params.token != process.env.MY_SYNC_KEY) { res.redirect("/"); return } const body = req.body // Create the file first, if it doesn't work, stay on the page. Dropbox.createFile(body).then(function(data) { if (!data) { res.end(); return } res.redirect("/cmd.sync/" + process.env.MY_SYNC_KEY) }) }) Then the createFile function: const request = require("request") Dropbox.createFile = function(body) { return new Promise(function(resolve) { const fileContents = body.title + "\n\n" + body.body const fileName = "posts/" + body.datetime + "-" + body.title + ".md" const options = { method: "PUT", url: "" + fileName + "?overwrite", headers: { "Content-Type": "text/plain", Authorization: "Bearer " + process.env.DB_ACCESS_TOKEN }, body: fileContents } request(options, function(err) { if (err) { console.log("Creating " + fileName + " failed.") } else { resolve(true) } }) }) } And lastly, the view is just a <form method="post" action="/create"> with some text inputs and an <input class="button" type="submit">. I hope I managed to explain things properly throughout the series, I hope I haven't forgotten anything, and if there's anything to fix or improve, don't hesitate to contact me; I'd be more than glad to discuss about anything.]]> For Dropbox handling I chose a pretty small library, node-dropbox. To use it, I went in Dropbox's developer dashboard and created an access token (instead of using secrets and keys) and saved that in my .env. Then onto the helper: const dropbox = require('node-dropbox').api(process.env.ACCESS_TOKEN) function Dropbox() {} Dropbox.getFolder = function(path) { return new Promise(function(resolve) { // This will get all the metadata for each file from the folder. dropbox.getMetadata(path, function(error, result, folder) { if (error) { console.log('Getting ' + path + ' failed.') } else { resolve(folder) } }) }) } Dropbox.getFile = function(path) { return new Promise(function(resolve) { // This will return the contents of a file. dropbox.getFile(path, function(error, result, file) { if (error) { console.log('Getting ' + path + ' failed.') } else { resolve(file) } }) }) } Lastly, we will need three new db methods that we'll use when syncing: Db.updatePost = function(post) { const query = 'UPDATE posts SET' + ' ' + fields() + ' = ' + values(post) + ' WHERE ' + ' link = \'' + post.link + '\' AND ' + ' datetime = \'' + post.datetime + '\'' // Connect and run the query }) Db.insertPost = function(post) { const query = 'INSERT INTO posts ' + fields() + ' VALUES ' + values(post) // Connect and run the query }) Db.delete = function(post) { const query = 'DELETE FROM posts WHERE' + ' link = \'' + post.link + '\' AND' + ' datetime = \'' + post.datetime + '\'' // Connect and run the query }) The fields and values methods are just helpers for building the query, one for returning the fields, one for creating the values: function fields() { return '(title, body, datetime, modified, link, readingtime)' } function values(post) { // Escape all ' in the title and body by adding another one const title = post.title.replace(new RegExp('\'', 'g'), '\'\'') const body = post.body.replace(new RegExp('\'', 'g'), '\'\'') return '(\'' + title + '\', ' + '\'' + body + '\', ' + '\'' + post.datetime + '\', ' + '\'' + post.modified + '\', ' + '\'' + post.link + '\', ' + '\'' + post.readingTime + '\')' // This will return a string similar to fields: ('A title', 'A body', '2016-07-22-2148', 'a-title', '2 min') } And these two come together in the sync route: // Force means update even if the date of the file is the same as the database post's // shouldDelete means files have been deleted and the respective posts need to be deleted // This is because for deleting there's an extra couple of iterations over all posts, and takes a bit longer router.get('/' + process.env.MY_SYNC_KEY + '/:key1?/:key2?', function(req, res) { const shouldDelete = req.params.key1 == 'delete' || req.params.key2 == 'delete' const forced = req.params.key1 == 'force' || req.params.key2 == 'force' This will wait for both promises to finish, then return them in an array: Promise.all([Db.fetchPostsForUpdating(), Dropbox.getFolder('/posts')]).then(function(data) { let posts = data[0].posts const folder = data[1] Since Dropbox.getFile is async, and wrapped in a promise, we need to wait for all of them to finish. Luckily, bluebird (Promise) has a nice little helper, map, which will do just that; we just have to return a Promise: Promise.map(folder.contents, function(item) { return Promise.join(item, Dropbox.getFile(item.path), function(item, file) { return { item: item, file: file } }) This will return an array of promises containing the item and the file contents, so we map once more, to eventually create an array of }).map(function(dropboxData) { const item = dropboxData.item const file = dropboxData.file First, we create a safe-ish link out of the file name: const matches = item.path.match(/\/(posts)\/(\d{4})-(\d{2})-(\d{2})-(\d{4})-([\w\s\.\/\}\{\[\]_#&@$:"';,!=\?\+\*\-\)\(]+)\.md$/) const datetime = matches[2] + '-' + matches[3] + '-' + matches[4] + '-' + matches[5] let link = matches[6].replace(/([#,;!:"\'\?\[\]\{\}\(\$\/)]+)/g, '') .replace(/&/g, 'and') .replace(/\s|\./g, '-') .toLowerCase() Then we create the title from the first line, delete the second line and create the body out of the rest by transforming the markdown syntax in html with marked, save the last modified time in modified and return the Post object: let lines = file.toString().split('\n') const title = lines[0] lines.splice(0, 2) const body = lines.join('\n') const modified = item.client_mtime return new Post(title, marked(body), Post.readingTime(body), datetime, modified, link) We now have an array of Posts that represent the data in Dropbox ( newPosts), and an array of Posts that represent the data in the database ( }).then(function(newPosts) { if (shouldDelete) { // Iterate through existing posts, and if no corresponding // file is found, delete the post, and remove it from the data. data[0].posts.forEach(function(post, index) { let matchingNewPosts = newPosts.filter(function(newPost) { return Post.linksMatch(newPost, post) && newPost.datetime == post.datetime }) if (matchingNewPosts.length) { return } // We don't need to wait for this to finish Db.deletePost(post) // After the iteration is finished, posts will represent the data in the database // with the extra posts removed. posts.splice(index, 1) }) } Quick explanation: if a Dropbox post has the same link and the same datetimeas a database post , it means it's the same entity, since these fields are created out of filenames, which are unique; if they have the same link, but different datetimes, then they are different entities. We will now iterate through the Dropbox posts, find the ones with matching links, and if none is found, we create it, but since we want to wait for all posts to be created, we will use bluebird's map again, and return an object that contains the matching posts (or empty array), and the newPost: Promise.map(newPosts, function(newPost) { // Just the one(s) with the same link let matchingPosts = posts.filter(function(p) { return Post.linksMatch(newPost, p) }) let returnObject = { newPost: newPost, matchingPosts: matchingPosts } // Create if (matchingPosts.length == 0) { Db.createPost(newPost) } return returnObject Quick reminder that when map finishes, we will have an array of objects: }).each(function(data) { const newPost = data.newPost const matchingPosts = data.matchingPosts Inside map we created new Posts if no matches were found. Here, if we have one or more matches, we will update the ones with matching datetime: matchingPosts.forEach(function(matchingPost) { // Update if (newPost.datetime == matchingPost.datetime) { // Only if these differ, no reason to query the db for nothing if (newPost.modified != matchingPost.modified || forced) { Db.updatePost(newPost) } return } If any of the matches have non-matching datetimes, that means it's just another post, created by mistake with the same name, so we will append --1 to it, or if that exists --2, and so on: let variant // Create a new one, with same link, but duplicated. // If it has --1 already, make it --2, and so on. if (matchingPost.link.slice(-3, -1) == '--') { variant = parseInt(matchingPost.link.slice(-1)[0]) } else if (matchingPost.link.slice(-4, -2) == '--') { variant = parseInt(matchingPost.link.slice(-2)) } else { variant = 0 } variant += 1 newPost.link += '--' + variant Db.createPost(newPost) }) When everything is finished, we just redirect back to the homepage: }).then(function() { res.redirect('/') }) }) }).catch(function(error) { console.log(error) }) }) Hopefully I managed to split and explain the code properly. If I didn't, don't hesitate to drop by and ask questions, or give any kind of feedback @rolandleth.]]> Last time we created a basic database handling functionality, but it was clear we can improve it: posts aren't the only ones that need fetching, and requiring the dbconfig file everywhere (and configuring the object) will become cumbersome. So, a first step would be to create specialized DbConfig objects: DbConfig.page = function(page) { const config = new DbConfig() config.offset = config.limit * (page - 1) return config } DbConfig.post = function(link) { const config = new DbConfig() config.fields = ['link'] config.fieldValues = [link] config.limit = 1 return config } // And so on, for archive, sitemap, search and feed and update Then, the next and final step would be to create Db helper methods. For example, fetching one post would go from this: const DbConfig = require('../models/dbconfig') const config = DbConfig.post config.fields = ['link'] config.fieldValues = [url] config.limit = 1 Db.fetchPosts(config).then(function(data) { // do stuff }) To this: // Inside db.js Db.fetchPost = function(link) { return Db.fetchPosts(DbConfig.post(link)) } // Somewhere where a post is needed Db.fetchPost(url).then(function(data) { // do stuff }) Each fetch method is currently used only once, and all this might not seem much, but dbconfig is now required only inside db, and all variations of DbConfig are easily found in one place. Also, if one of the fetch methods will be used somewhere else, no code will be duplicated. Last time I was talking about the DbConfig model and the fetchPosts function, so let's dive in. First, a few examples of using the config: const Db = require('../lib/db') const DbConfig = require('../models/dbconfig') // Fetching one post const config = new DbConfig() config.fields = ['link'] config.fieldValues = [req.baseUrl.substring(1)] // The path without the initial `/` config.limit = 1 // Search, meaning to fetch based on a query const config = new DBConfig() config.searching = true // Special logic for creating the query command config.limit = 0 // All config.fields = ['body', 'title'] // Create an array of strings with matches between quotes as single items, // and matches outside quotes as multiple items // "one item" several items -> ['one item', 'several', 'items'] config.fieldValues = req.query.query .match(/\"(.*?)\"|(\w+)/g) .map(function(match) { return match.replace(/"/g, '') }) // Archive, meaning to fetch all posts, but only the title, link and datetime to group them const config = new DbConfig() config.limit = 0 config.columns = 'title, link, datetime' // Not an array, like fields and fieldValues, // because they go together, anyway: 'SELECT title, link, datetime FROM posts' As for the fetchPosts, it's a bit tricky, but not that bad. First, let's introduce a new object, Result, to use as the result for fetching posts: DbResult = function() { this.posts = [] this.totalPosts = 0 } And then, the main course: Db.fetchPosts = function(config) { // As I said, columns is used in one go let\$&</mark>' ) let lines = [] // Split the body into lines body.split('\n').forEach(function(line) { let _line = line // Search for hrefs, asset links and images if (line.indexOf('<a href=<') || line.indexOf('/assets/') || line.indexOf('<img src=')) { // And remove the occurrences of the search marks on that line, // otherwise links and images will break _line = _line.replace(/<\/mark>/g, '') _line = _line.replace(/<mark class='search'>/g, '') } lines.push(_line) }) // Then join the lines again body = lines.join('\n') } return new Post( rawPost.title, body, rawPost.readingtime, rawPost.datetime, rawPost.modified || '', rawPost.link ) }) resolve(res) }) }) }) } As for using it, here's my post route: const router = require('express').Router() const NotFound = require('./not-found') const Db = require('../lib/db') // This resolves to router.get('/', function(req, res) { const config = new DB.Config() // Query by the link field config.fields = ['link'] // Use the path without the starting '/' as the value config.fieldValues = [req.baseUrl.substring(1)] // Fetch only 1 config.limit = 1 Db.fetchPosts(config).then(function(data) { if (data.posts.length == 0) { // Since I need this in several places, I just created a helper to render the 404 page NotFound.show(res) return } // Get the first post and render it const post = data.posts[0] res.render('index', { posts: data.posts, // Since it's the same partial for one post, or several, it expects an array title: post.title, page: 1, totalPosts: 1, metadata: post.title }) }).catch(function() { NotFound.show(res) }) }) I'm sure I could improve a lot of code, so if you have any suggestions (or questions), I'd be more than happy to hear from you @rolandleth.]]> With Sinatra, I was using DataMapper as ORM (Object Relational Mapper) for Postgres, and I got a bit spoiled, because it makes things really easy. You first have to define your data mapping: class Post include DataMapper::Resource # Set the number of characters for these types of fields, if the DB supports it DataMapper::Property::String.length(255) DataMapper::Property::Text.length(999999) property :id, Serial property :title, Text property :body, Text property :datetime, String property :modified, String property :link, String end And using it is really straightforward: Post.first # or with some fields as parameters, to narrow the query down Post.first(link: 'node-js') Post.create # Taking all fields as parameters post = Post.first post.destroy # or post.update # Taking the fields that need updating as parameters Now, there are ORMs for Node.js, like Sequelize, but I went with a direct approach instead. First, I created a Post model: function Post(title = '', body = '', readingTime = '', datetime = '', modified = '', link = '') { this.title = title this.body = body this.readingTime = readingTime this.datetime = datetime this.date = Post.dateFromDateTime(datetime) || '' this.modified = modified this.link = link } module.exports = Post // Somewhere else in the app: const post = Post(title: 'Title') post.body = 'Body' ... And a database manager, that will use the pg package: const pg = require('pg') function Db() {} // Static functions Db.createPosts = function(param1, param2) { } Db.updatePost = function(param1, param2) { } Db.deletePost = function(param1, param2) { } Db.fetchPosts = function(param1, param2) { } module.exports = Db But it's already obvious we could improve upon the params of these functions, since they're usually the same, and, in time, some scenarios will surely end up needing more params that others will not. The answer was to create a config model, which will have default values for its properties: DbConfig = function() { this.fields = null # The fields to query by this.fieldValues = null # The values to query by this.orderBy = 'datetime' # The field to order by this.orderDirection = 'ASC' # The direction to order by this.limit = process.env.PAGE_SIZE || 10 # The number of items to return # We'll see more here in future posts } Now, the fetchPosts will look like this: Db.fetchPosts = function(config) { // We'll see later exactly what goes in here, but generally // config.param1, config.param2 are used instead of param1 and param2 } And calling it will look like this: const Db = require('../lib/db') const DbConfig = require('../models/dbconfig') const config = new DbConfig() config.fields = ['link'] config.fieldValues = ['node-js'] config.orderBy = 'title' Db.fetchPosts(config) ... So, what about the actual implementation of fetchPosts? Db.fetchPosts = function(config, completion) { let query = 'SELECT'... pg.connect(dbURL, function(err, client, done) { client.query(query, function(err, result) { done() // Do stuff with result completion(result / processedResult, err) }) }) } // Using it Db.fetchPosts(config, function(result, err) { if (err) { // handle failure return } // Do stuff with result }) While there's not much value in using Promises here, I still decided to do it, at least for learning purposes. I would do them injustice if I tried to go in depth about them, so I suggest reading about them a bit, if required, but the core idea behind promises is that a promise represents the result of an asynchronous operation. So, the above code would turn into: Db.fetchPosts = function(config) { return new Promise(function(resolve, reject) { let query = 'SELECT'... pg.connect(dbURL, function(err, client, done) { client.query(query, function(err, result) { done() // Do stuff with result if (err) { reject (err) } else { resolve(result / processedResult) } }) }) }) } // Using it Db.fetchPosts(config).then(function(result) { // Do stuff with result }).catch(function(err) { // Handle failure }) // Promises would have really shined if we were to chain several then blocks before the final catch Next time I will dive into the fetchPosts function, its uses, and how the Config model turned out to be really helpful. In the previous posts I mentioned file locations, but I haven't been explicit on what my structure is, so let's go through it: - assets |___ javascripts (3rd party js) |___ stylesheets |___ images |___ files (random files I want to serve, like my resume) - lib (let's call them helpers) |___ tasks (anything I want to run on Heroku) |_ db.js (for communicating with Postgres) |_ dropbox.js (for communicating with Dropbox APIs) |_ feed.js (just one function to create the feed.xml) |_ sitemap.js (just one function to create the sitemap.xml) - models |_ post.js (the post model and its helpers) |_ dbResult.js |_ dbConfig.js - public (public files) |_ robot.txt |_ humans.txt |_ favico.ico - routes (for express, at least, these can also be considered controllers) |___ projects |___ all.js |___ my.js |___ projects.js |_ article.js |_ page.js |_ routes.js (to keep all routes in one file) |_ etc.js - views |___ partials |_ header.ejs |_ footer.ejs |_ etc.ejs |_ about.ejs |_ index.ejs (the main page) |_ layout.ejs (the layout mentioned in one of the previous posts) |_ etc.ejs It might not be the best structure, but it made sense to me. As a layout tip: when you're creating partials, and your views' structure, try to do it from your code's perspective. For example the search page is a partial, and the main page is created from search + page navigation: // Main page. <% include ./partials/search %> <% if (totalPages > 1) { %> <% include ./partials/page-navigation %> <% } %> It might seem counterintuitive, but the thing is that the search page is just the main page, but without page navigation, so it makes sense to compose the latter from the former: // Search partial. <% layout('layout') %> <% block('head').append('<link rel = "stylesheet" href = "/assets/post.css">') %> <div class="content"> <% posts.forEach(function(post) { %> <% include ./post %> <% }) %> </div> Now, if we replace <% include ./post %> with the actual contents of the file, the main page will ultimately look like this: <% layout('layout') %> <% block('head').append('<link rel = "stylesheet" href = "/assets/post.css">') %> <div class="content"> <article> <header> <h1> <a href="/<%= post.link %>"><%= post.title -%></a> </h1> <h4> <% var month = post.date.toLocaleString('en-us', { month: "short" }) %> <% var date = month + ' ' + post.date.getDate() + ', ' + post.date.getFullYear() %> <time datetime="<%= date %>"> <%= date %> </time> <% if (post.readingTime) { %> : <span class="tilde"> ~</span> <%= post.readingTime %> <% } %> </h4> </header> <%- post.body %> </article> </div> <% if (totalPages > 1) { %> <% include ./partials/page-navigation %> <% } %> which is just what we need!]]> Express> Express offers a really easy routing system, which is the main selling point: var app = require('express')() app.use('/', require('./routes/routes')) This will delegate all routing to the routes.js file, inside the routes folder, at the root level. Here, we delegate individual routes to their specific files, but declaring routes will be just as easy: var router = require('express').Router() router.use('/privacy-policy', require('./privacy-policy')) router.use('*', function(req, res) { // req stands for request, res for response res.render('not-found') // This is the 404 page, and should be the last route }) module.exports = router I picked the privacy policy route as an example, because you can download the policy in md format, so we'll cover nested routes and serving files in one go: var router = require('express').Router() router.get('/', function(req, res) { res.render('privacy-policy') }) router.get('/download', function(req, res) { res.download('./assets/files/Privacy Policy.md', 'Roland Leth Privacy Policy.md') }) module.exports = router Because we got here from router.use('/privacy-policy), / just means the root of the current route, which will resolve to rolandleth.com/privacy-policy, the second get resolving to rolandleth.com/privacy-policy/download, respectively. The res.download's first parameter is the file to server, and the second parameter is the name that the resource will be saved with. Next time we'll talk about layouts.]]>.]]> Recently.]]> Having to create the same set of buttons over and over can become cumbersome. We'll try to make use of protocols and implement some default ones. Let's start with that: @objc protocol CanGoBack { // Sounds better than Backable :) func back() } extension CanGoBack where Self: UIViewController { func back() { if presentingViewController != nil, navigationController?.childViewControllers.first == self { navigationController?.dismiss(animated: true) } else { navigationController?.popViewController(animated: true) } } func setupBackButton() { guard navigationController?.childViewControllers.first != self else { return } let backButton = UIButton.myBackButton() backButton.addTarget(self, action: #selector(back), for: .touchUpInside) // ?? navigationItem.leftBarButtonItem = UIBarButtonItem(customView: backButton) } } But here we stumble upon the problem: back has to be marked as @objc, to be able to create the #selector, but that's not possible; @objc is to be used for members of classes, @objc protocols, and concrete extensions of classes. Sadly, there's no way to work around this without some subclassing, but the good news is it's rather straightforward: class Button: UIButton { // `action` is the internal variable of type `Selector`. private let buttonAction: () -> Void @objc private func performButtonAction() { buttonAction() } init(action: () -> Void) { buttonAction = action let image = UIImage(named: "back") super.init(frame: CGRect(origin: .zero, size: image?.size ?? .zero)) setImage(image, forState: .Normal) addTarget(self, action: #selector(performButtonAction), forControlEvents: .TouchUpInside) } } We then modify our protocol, by replacing the UIButton creation: func setupBackButton() { guard navigationController?.childViewControllers.first != self else { return } let backButton = Button(action: { [weak self] in self?.back() } ) navigationItem.leftBarButtonItem = UIBarButtonItem(customView: backButton) } And from now on, all we have to do for an UIViewController to have a back button and action is: class SecondMainViewController: UIViewController, CanGoBack { override viewDidLoad() { super.viewDidLoad() setupBackButton() } } We could go a step further and subclass UIBarButtonItem, but since the logic is exactly the same, there's no need to include it here. We all hate explicitly typed strings, especially for creating UIImages, but we can surely improve on that. Let's start with an enum, that has a var, to transform its raw value into an UIImage: enum Asset: String { case back = "leftArrow" case logo case email case briefcase // Theoretically, a bang wouldn't be that bad here, // as we should be 100% sure of what goes in this enum var image: UIImage? { return UIImage(named: rawValue) } } And why not improve UIImageView too, while we're at it? extension UIImageView { convenience init(asset: Asset) { self.init(image: asset.image) } } Now, by having autocomplete, our calls are easier, and safer: ]]>]]> let backImage = Asset.back.image let emailImage = Asset.logo.image let logoImageView = UIImageView(asset: .logo) let briefcaseImageView = UIImageView(asset: .briefcase) I was reading Shannon's post about shape layers, and one of the resources he links to is this page about fill rules. It was interesting to see the inner workings of them. Go and bookmark calayer.com, as well!]]> The following snapping logic is for a collection with cells of the same size and one section, but the logic for more sections shouldn't be much different, or much more complex. scrollViewWillEndDragging has an inout targetContentOffset parameter, meaning we can read and modify the end position of the scroll. Luckily, we don't need to take into consideration insets, line or item spacing (I've lost a lot of time by including them, then not being able to understand why the correct math produces wrong results): let cellWidth = collectionView( // 1 collectionView, layout: collectionView.collectionViewLayout, sizeForItemAt: IndexPath(item: 0, section: 0) ).width let page: CGFloat let snapPoint: CGFloat = 0.3 let snapDelta: CGFloat = 1 - snapPoint let proposedPage = targetContentOffset.pointee.x / max(1, cellWidth) // 2 if floor(proposedPage + snapDelta) == floor(proposedPage) && scrollView.contentOffset.x <= targetContentOffset.pointee.x { // 3 page = floor(proposedPage) // 4 } else { page = floor(proposedPage + 1) // 5 } targetContentOffset.pointee = CGPoint( // 6 x: cellWidth * page, y: targetContentOffset.pointee.y ) First, we'll need our cell width (1) so we can calculate the "proposed page" (2): this is the "page" we're at during scrolling (for example 3.25 would mean page 3 and a quarter of the fourth). If our desired snapPoint is 30%, then our snapDelta would be 1 - 0.3 = 0.7. Think of it like this: 30%of a page, then we virtually reached its end and we need to scroll to the beginning of the next one: 3.3 + 0.7 = 4.0and floor(4.0) > floor(3.0)(5) 30%of a page, then we need to stay on it by scrolling to its beginning: 3.25 + 0.7 = 3.95and floor(3.95) == floor(3.0)(4) We also need to consider the case where the user scrolls past the last page (3) - the targetContentOffset will be within bounds, but the current contentOffset won't be, so we need to check for that as well (5). Finally, we replace the targetContentOffset with our computed value (6). Update, Feb 13, 2017: Based on the previous point of "insets don't have to be taken into account", I've lost a lot of time in a recent project, when I actually had to take them into consideration (for "true" pagination, at least), so I'm a bit lost about this. If "true" pagination is desired, as in scroll one page at a time, we need to change things a little bit: private var startingScrollingOffset = CGPoint.zero func scrollViewWillBeginDragging(_ scrollView: UIScrollView) { startingScrollingOffset = scrollView.contentOffset // 1 } func scrollViewWillEndDragging(_ scrollView: UIScrollView, withVelocity velocity: CGPoint, targetContentOffset: UnsafeMutablePointer<CGPoint>) { // [...] let offset = scrollView.contentOffset.x + scrollView.contentInset.left // 2 let proposedPage = offset / max(1, cellWidth) let snapPoint: CGFloat = 0.1 let snapDelta: CGFloat = offset > startingScrollingOffset.x ? (1 - snapPoint) : snapPoint if floor(proposedPage + snapDelta) == floor(proposedPage) { // 3 page = floor(proposedPage) // 4 } else { page = floor(proposedPage + 1) // 5 } targetContentOffset.pointee = CGPoint( x: cellWidth * page, y: targetContentOffset.pointee.y ) } We'll now need to save the position when scrolling started (1). Then, we'll use the current contentOffset and left inset instead of the targetContentOffset (2). The snapDelta logic changes too, as follows, based on direction: swiping left: 10%of the current page, then we virtually reached its end and we need to scroll to the next one: 3.1 + 0.9 = 4.0and floor(4.0) > floor(3.0)(5) 10%of the current page, we need to stay on it, by scrolling back to our starting point: 3.05 + 0.9 = 3.95and floor(3.95) == floor(3.0)(4) swiping right: 90%of the previous page, then we virtually passed its end and we need to scroll to its beginning: 2.89 + 0.1 = 2.99and floor(2.99) < floor(3.0)(5) 90%of the previous page, we need to stay on the current page, by scrolling back to our starting point: 2.91 + 0.1 = 3.01and floor(3.01) == floor(3.0)(4) As for calculating our page: we'll remove the targetContentOffset logic from the condition and use the snapDelta we just computed (3) instead of a flat value of 0.7. While the percentages were randomly picked, 0.1 feels a bit better for true pagination, while 0.3 feels better for snapped scrolling. If you have any feedback, drop by to chat @rolandleth. I think that what I do here can't really be called writing; it's more like scribbling, my posts are rather small and rare. But I try to be consistent, even if what I'm going to write about seems to not be of a big deal, because I believe that any info, no matter how big or small, might turn out to be useful to someone. On the other hand, despite all these facts, I have weeks when I have absolutely no idea what to write about; there's zero inspiration. I can't even imagine how hard it is to consistently write posts of 500, 1000 or more words. Hats off.]]> I personally find this a bit of a mouthful, especially if you have to type it a lot: if someDate.compare(otherDate) == .OrderedAscending { // Do stuff } Update, Oct 10, 2017: These are now built-in for Date. Leaving these here like they are (Swift 2), since the whole post is just about them. But we can have a few operators to make our lives a bit easier: func <(lhs: NSDate, rhs: NSDate) -> Bool { return lhs.compare(rhs) == .OrderedAscending } func <=(lhs: NSDate, rhs: NSDate) -> Bool { return lhs < rhs || lhs == rhs } func >(lhs: NSDate, rhs: NSDate) -> Bool { return lhs.compare(rhs) == .OrderedDescending } func >=(lhs: NSDate, rhs: NSDate) -> Bool { return lhs > rhs || lhs == rhs } func ==(lhs: NSDate, rhs: NSDate) -> Bool { return lhs.compare(rhs) == .OrderedSame } func !=(lhs: NSDate, rhs: NSDate) -> Bool { return !(lhs == rhs) } I think this is not only prettier and shorter, but a bit more expressive, as well: ]]>]]> if someDate < otherDate { // Do stuff } Postarea originală poate fi găsită aici. Unele cuvinte/exprimări traduse ori sună ciudat, ori își pierd din înțeles, așa că pentru acestea, am adăugat între paranteze termenul în varianta netradusă "(așa)". Am încercat pe cât posibil să păstrez tonul autorului. Aceasta este prezentarea pe care am ținut-o la App Builders din Elveția pe 25 Aprilie 2016. Slide-urile sunt disponibile pe SpeakerDeck. Înregistrarea video a sesiunii este disponibilă pe YouTube. Articolul a apărut în format fizic în ediția din iunie 2016 a Hacker Bits. Salut, tuturor! Sunt un programator în vârstă de 40 de ani, care a învățat pe cont propriu, iar aceasta este povestea mea. Acum câteva săptămâni am dat peste tweet-ul de mai jos, tweet care m-a făcut să reflectez asupra carierei mele, iar aceste gânduri m-au dus înapoi la momentul în care a început totul pentru mine: Mi-am început cariera de programator la ora 10 fix, într-o zi de Luni, pe data de 6 Octombrie, 1997, undeva prin Olivos, la nord de Buenos Aires, Argentina. Momentul exact a fost Unix Epoch 876142800. Îmi sărbătorisem recent ziua de naștere de 24 de ani. Lumea era ceva mai diferită pe atunci. Site-urile web nu aveau avertizări de cookies. Viitorul web-ului erau portalurile gen Excite.com. AltaVista era motorul meu preferat de căutare. E-mailul meu era kosmacze@sc2a.unige.ch, asta însemnând că site-ul meu personal era. Încă jeleam după Prințesa Lady Diana. Steve Jobs lua rolul de CEO și convinsese Microsoft să investească 150 de milioane de dolari în Apple Computer. Digital Equipment Corporation dădea în judecată Dell. Rămășițele lui Che Guevara tocmai ce fuseseră aduse înapoi în Cuba. Al patrulea sezon din Friends tocmai ce începuse. Gianni Versace tocmai ce fusese omorât în fața casei sale. Maica Tereza, Roy Lichtenstein și Jeanne Calment (cea mai în vârstă persoană care a existat vreodată) tocmai ce încetaseră din viață. Oamenii se jucau într-una Final Fantasy 7 pe PlayStation-ul lor. BBC 2 începuse să difuzeze Teletubbies. James Cameron era pe cale să lanseze Titanic-ul. Cei din The Verve tocmai ce lansaseră hit-ul lor “Bitter Sweet Symphony”, ca apoi să fie nevoiți să plătească majoritatea veniturilor din drepturi de autor către Rolling Stones. Telefoanele inteligente (smartphones) arătau ca Nokia 9000 Communicator: aveau 8 MB memorie, un procesor i386 de 24 MHz și rulau sistemul de operare GEOS. Ceasurile inteligente (smartwatches) arătau precum CASIO G-SHOCK DW-9100BJ. Nu aveau atât de multe aplicații, dar bateria rezista mult mai mult. IBM Deep Blue îl învinsese pentru prima dată pe Garry Kasparov într-un joc de șah. Un hacker, cunoscut sub pseudonimul de ”_eci”, publicase codul sursă scris în C pentru o exploatare a unei vulnerabilități din Windows 3.1, 95 și NT denumită “WinNuke”, cu un vector de atac de tip denial-of-service pe portul TCP 139 (NetBIOS), cauzând ceea ce a ajuns să fie cunoscut drept "Blue Screen of Death” (Ecranul albastru al morții). Întâmplător, 1997 este și anul în care s-au născut Malala Yousafzai, Chloë Grace Moretz și Kylie Jenner. Multe povești din filme se petrec in 1997, cum ar fi: Escape from New York, Predator 2, The Curious Case of Benjamin Button, Harry Potter and the Half-Blood Prince, The Godfather III și, conform Terminator 2: Judgement Day, Skynet devenea conștient la 2:14 pe 29 August 1997. Asta nu s-a întâmplat; în schimb, într-o interesantă întorsătură de evenimente, domeniul google.com a fost înregistrat pe 15 semptebrie al aceluiași an. Eram la 2 ani distanță de Y2K, iar mass-media începuse să neliniștească oamenii. Prima mea slujbă a implicat crearea de pagini ASP în varii editoare text, de la Microsoft FrontPage, la HotMeTaL Pro sau EditPlus, administrarea compatibilității între browserele Netscape Navigator și Internet Explorer 4 și scrierea de proceduri stocate (stored procedures) cu SQL Server 6.5 care stătea la baza unui site web publicat in japoneză, rusă, engleză și spaniola — fără niciun fel de suport de consistență UTF-8 de-a lungul întregului sistem tehnologic. Produsul rezultat din aceste eforturi rula pe un server Pentium II, găzduit undeva prin SUA, cu un hard disk uimitor de 2 GB și extraordinara memorie de 256 MB de RAM. Era un singur server ce rula Windows NT 4, SQL Server 6.5 și IIS 2.0, servind undeva la 10.000 vizitatori pe zi. Primul meu limbaj de programare profesional a fost acest mutant denumit VBScript și, desigur, un pic de JavaScript pe partea de client, presărat cu foarte multe ”dacă browser-ul este Netscape fă asta, altfel fă asta”, pentru că pe atunci nu aveam nicio idee despre cum să folosesc JavaScript într-un mod corect. Destul de interesant este că astăzi suntem în 2016 și abia începem să înțelegem cum să facem ceva în JavaScript. Nu se auzise încă despre unit tests. Agile Manifesto nu fusese scris încă. Integrarea continuă (continuous integration) era doar un vis. XML nici măcar nu era un cuvânt la modă. Strategia noastră de testare implica repornirea serverului o dată pe săptămână, pentru că altfel se strica într-un mod aleator. Ne creaserăm propria noastră componentă COM+ în Visual J++ pentru a analiza fișierele JPEG care se încărcau pe server. Imediat ce au început să apară fișierele codate (encoded) prin JPEG 2000, componenta noastră s-a stricat într-un mod mizerabil. În ultimele 6776 de zile mi-am băut cafeaua de dimineață, apoi am scris cod cu varii tehnologii, cum ar fi VBScript, JavaScript, Linux, SQL, HTML, Makefiles, Node.js, CSS, XML, .NET, YAML, Podfiles, JSON, Markdown, PHP, Windows, Doxygen, C#, Visual Basic, Visual Basic.NET, Java, Socket.io, Ruby, unit tests, Python, script-uri shell, C++, Objective-C, batch files, iar în ultima vreme Swift. În aceste 6776 s-au întâmplat multe, cel mai important lucru fiind faptul că m-am căsătorit. Mi-am dat demisia de la 6 slujbe și am fost dat afară de 2 ori. Am pornit și am închis propria afacere. Am obținut masteratul. Am publicat câteva proiecte cu cod sursă public (open source), iar despre unul din ele chiar Erica Sadun a scris un articol pe site-ul Ars Technica. Am apărut la emisiuni TV din Elveția și Bolivia. Am urmărit prezentări (keynotes) live cu Bill Gates sau Steve Jobs în Seattle și San Francisco. Am prezentat la și am co-organizat conferințe pe patru continente. Am scris și publicat două cărți. Am avut de două ori un colaps mental (burned out); s-au mai întâmplat și multe altele, frumoase dar și oribile. De multe ori m-am gândit să îmi schimb definitiv profesia de programator. Dar, cumva, programarea mă atrăgea înapoi după o perioada. Îmi place să creez aplicații, sisteme, software. Pentru a evita un nou colaps (burn out), a trebuit să creez varii strategii. În această prezentare vă voi împărtăși secretele mele, pentru ca și voi să puteți ajunge să fiți programatori cu experiență la măreața vârstă de 40 de ani, încă dornici să continuați această profesie. Niște sfaturi simple pentru a ajunge să fiți programatori fericiți la mareața vârstă de 40 de ani. Primul sfat pe care vi-l pot da tuturor este acela de a nu da atenție prea mare la ce este la modă. În fiecare an apare un nou limbaj de programare, o nouă bibliotecă, framework sau tipar, o nouă abordare arhitecturala care pune pe foc blogosfera. Oamenii o iau razna despre asta. Se țin conferințe. Se scriu cărți. Ciclurile de tip Gartner (hype cycles) apar și dispar. Consultanții cer sume enorme să instruiască, să publice, sau să distrugă viața oamenilor din aceasta industrie. Presa va susține aceste orori și vă va face să vă simțiți vinovați dacă nu le dați atenție. În 1997 au fost CORBA și RUP. În 2000 au fost SOAP & XML. În 2003 au fost Model Driven Architecture și Software Factories. În 2006 au fost Semantic Web și OLPC. În 2009 a fost Augmented Reality. În 2012 a fost Big Data. În 2015 … Realitatea Virtuala? Boți? Nu vă îngrijorați în legătură cu ce e la modă. Continuați să vă vedeți de treburile voastre, continuați să învățați ceea ce ați început să învățați și mergeți mai departe. Acordați atenție doar dacă sunteți cu adevărat interesați, sau dacă simțiți că vă va aduce vreun beneficiu pe termen mediu sau lung. Motivul pentru acest sfat vine de la faptul că, după cum au spus și romanii în trecut, nihil sub sole novum — "nimic nou sub soare". Majoritatea lucrurilor pe care le vedeți și pe care le învățați în domeniul calculatoarelor există de zeci de ani, iar acest lucru este ascuns intenționat sub munți de publicitate (marketing), cărți, blog-uri și întrebări pe Stack Overflow. Fiecare nouă arhitectură este doar o reinventare și o readaptare a unei idei care a existat de zeci de ani încoace. În industra noastră, fiecare tehnologie generează ceea ce eu numesc o ”galaxie”. Aceste galaxii conțin stele dar și găuri negre; schimbări ce dispar în noapte, multe planete, din care doar o mică parte conțin viață, mult praf cosmic și materie neagră (dark matter). Câteva exmple de galaxii: .NET, Cocoa, Node.js, PHP, Emacs, SAP, etc. Fiecare dintre acestea conține evangheliști, programator, bloggeri, podcast-uri, conferințe, cărți, cursuri, servicii de consultanță și probleme de incluziune socială. Galaxiile sunt create sub premisa că tehnologia lor este răspunsul la toate problemele. Fiecare galaxie este, deci, bazată pe o ipoteză greșită. Programatorii din aceste varii galaxii încorporează atitudinile arhetipale care au adus respectiva tehnologie la viață. Aderă la ideile acesteia, vor purta tricouri cu entuziasm și îi vor evangheliza pe toți ceilalți cu privire la meritele alegerilor lor. De fapt, folosesc termenul de galxie pentru a nu folosi termenul ceva mai potrivit, dacă nu controversat, de ”religie”. În cazul meu, am petrecut primii zece ani ai carierei mele în galaxia Microsoft, iar pe următorii 9 în galaxia Apple. Îndrăznesc să precizez că unul dintre motivele cele mai mari pentru care am schimbat galaxia a fost Steve Ballmer. Mă săturasem de atitudinea oamenilor din galaxia Microsoft față de codul sursă public (open source). Pe de cealaltă parte, trebuie să precizez că galaxia Apple este un loc deosebit de plăcut, plin de artiști și muzicieni și scriitori, care, pur întâmplător sau din neșansă, creează și software. Am participat la conferințe din galaxia Microsoft, cum ar fi Barcelona TechEd din 2003, sau la varii conferințe Tech Talks în Buenos Aires, Geneva sau Londra. Chiar am și prezentat la Microsoft DevDays 2006 din Geneva. Atitudinea generală a programatorilor din galaxia Microsoft este una neprietenoasă, ”corporatistă” și secretoasă, cu NDA-uri și procese IT complicate. Galaxia Apple a fost pentru mine, în 2006, total opusul; era plină de muzicieni, artiști, pictori; creau software pentru a-și susține pasiunea și creau software cu pasiune. Asta a reprezentat o diferență enormă, iar de atunci și până în prezent, această galaxie încă îmi aduce plăcere enormă, galaxia în care ne aflăm, chiar acum, cea care ne-a unit pe toți. Apoi a fost lansat iPhone-ul, iar restul este istorie. Așa că recomandarea mea către voi este următoarea: alegeți-vă galaxia cu înțelepciune, bucurați-vă de ea, mult sau puțin, dar țineți-vă telescopul îndreptat către celelalte galaxii și pregătiți-vă să faceți un salt rapid (hyperjump) către alte locuri, dacă este nevoie. Asta mă aduce la următorul punct: aflați și învățați despre cum a ajuns să existe tehnologia voastră preferată. Vă place C#? Știți cine este creatorul? Cum a apărut proiectul .NET? Cine a fost arhitectul principal (lead architect)? Care au fost constrângerile proiectului și cum a ajuns limbajul să fie ce este astăzi? Aplicați același principiu față de orice limbaj sau arhitectură CPU care vă place, sau pe care o iubiți: Python, Ruby, Java, oricare ar fi limbajul de programare; învățați despre originile lui; cum a ajus să existe. La fel și despre sistemele de operare, tehnologiile de rețea, hardware, orice. Învățați despre cum au ajuns oamenii respectivi să aibă ideile respective și cât a durat pentru ca ele să crească și să se maturizeze. Pentru că un software bun dureaza 10 ani să fie finalizat. Poveștile din jurul originilor industriei noastre sunt fascinante și vă vor demonstra două lucruri: primul, că totul este o readaptare (remix). Doi, că chiar voi ați putea fi cei care readaptează următoarea mare creație. Nu, rectific, voi veți fi creatorii ei. Și, pentru a vă ajuta să realizați acest lucru, aici este selectia mea (foarte subiectivă) de cărți de istorie care îmi plac și pe care le recomand: Printre altele, veți învăța să puneți preț pe lucrurile care au rezistat testul timpului: Lisp, TeX, Unix, bash, C, Cocoa, Emacs, Vim, Python, ARM, GNU make, man pages. Acestea sunt doar niște exemple de lucruri trainice și folositoare, care merită sărbătorite, prețuite și de la care se poate învăța câte ceva. Învățați. Orice. Vreți să învățați Fortran? Sigur. Vi se pare Erlang interesant? Excelent. Credeți că COBOL ar putea fi următoarea mare realizare din cariera voastră? Fantastic. Trebuie să știți mai multe despre Functional Reactive Programming? Sunteți invitații mei. Design? Desigur. UX? Trebuie. Poezie? Ar trebui. Multe concepte de bază ale Informaticii există de zeci de ani, ceea ce înseamnă că merită să învățați limbaje de programare vechi; chiar și cele ”misterioase”. În primul rând, vă va face să apreciați stadiul curent al industriei (sau să-l disprețuiți, depinde), iar în al doilea, veți învăța cum să folosiți uneltele actuale într-un mod mai eficient — măcar datorită faptului că veți înțelege trecutul și istoria lor. Sfatul 1: învățați măcar un nou limbaj de programare în fiecare an. Nu eu am fost cel care a venit cu această idee, ci cartea The Pragmatic Programmer. Și funcționează. Un nou limbaj de programare în fiecare an. Simplu, nu? Mergeți mai departe decât pasul obișnuit ”Hello, World” și creați ceva folositor cu el. Eu de obicei creez un calculator simplu cu orice noua tehnologie pe care o învăț. Mă ajută să descopar sintaxa, mă familiarizează cu API-urile sau editorul (IDE), etc. Sfatul 2: citiți cel puțin 6 cărți anual. V-am prezentat mai sus o listă cu 6 cărți care trebuie citite, care ar trebui să vă țină ocupați pentru un an. Urmează lista pentru al doilea an: (OK, sunt șapte.) Șase cărți anual pare mult, dar înseamnă doar o carte la fiecare 2 luni, iar majoritatea cărților pe care le-am prezentat nu sunt atât de lungi, și, nu doar atât, sunt extrem de bine scrise, sunt amuzante și sunt pline de învățături. Haideți să o privim altfel: dacă acum aveți 20 de ani, la 30 veți fi citit peste 60 de cărți și peste 120 când veți fi de vârsta mea. Și vă veți fi jucat cu 20 de limbaje de programare. Doar gândiți-vă un pic la asta. Câteva din cărțile pe care le-am selectat pentru voi au fost scrise în anii ’70, unele în ’80, altele în '90, iar majoritatea în ultimii zece ani. Reprezintă cele mai bune materiale scrise peste care am dat în industria noastră. Dar să nu le citiți și atât; luați notițe. Puneți-vă semne de carte. Scrieți pe paginile cărților. Apoi recitiți-le din când în când. Borges obișnuia să spună că una din plăcerile mai mari decât a citi o carte este de a o reciti. Și, vă rog, cumpărați în format fizic cărțile care vă plac cu adevărat. Credeți-mă. Cărțile digitale sunt supraestimate. Nimic nu se compară cu cartea fizică. De asemenea, vă rog să fiți conștienți că pe măsura ce înaintați în vârstă, numărul de lucruri care vor mai putea fi considerate noi și/sau importante va scădea semnificativ. Pregătiți-vă pentru asta. Va fi în regulă să vărsați o lacrima în tăcere când veți realiza acest lucru. Odată ce ați învățat, învățați și pe alții (teach). Este foarte important. Asta nu înseamnă să pregătiți o clasă și să chemați oameni să vă asculte vorbind (deși ar fi extraordinar dacă ați face asta!). Ar putea însemna să oferiți răspunsuri semnificative întrebărilor de pe Stack Overflow; să scrieți o carte; să publicați un podcast despre tehnologia voastră favorită; să mențineți un blog; să scrieți pe Medium; să vizitați un alt continent pentru a organiza cursuri de programare folosind Raspberry Pi; sau să ajutați un programator mai tânăr devenindu-i mentor (dar nu faceți asta înainte de 30 de ani). Învățând și pe alții veți deveni mai modești, pentru că veți realiza într-un mod dureros cât de limitate sunt cunoștințele voastre. A învăța și pe alții este cel mai bun mod de a învăța voi. Doar prin testarea cunoștințelor cu ajutorul altora veți reuși să învățați cu adevarat. Asta vă va face să aveți mai mult respect pentru alți programatori și alte tehnologii; fiecare limbaj, irelevant cât de modest sau misterios, își are locul în Tao of Programming și numai învățând și pe alții veți reuși să o simțiți. Învățând și pe alții veți reuși să faceți o diferență în lumea asta. În 2012 am primit o scrisoare de la o persoană care a participat la unul din cursurile mele. Era programatoare de Adobe Flash. Țineți minte ActionScript și toate cele? Nesurprinzător, după 12 ani de lucru ca programator freelancer de Flash, s-a trezit fără loc de muncă. Singură. Cu un bebeluș de hrănit. Mi-a spus că i-a plăcut cursul, dar și că a învățat ceva folositor și că a reușit să-și găsească o slujbă de programator de mobile web. Îmi scrisese pentru a-mi mulțumi. Nu pot spune că am schimbat lumea, dar probabil că am transformat-o puțin, în ceva mai bun (sper). Acest gând a făcut ca toate lecțiile pe care le-am dat din acel moment să fie mai semnificative și care să merite mai mult. Nu vă așteptați de la corporațiile software să vă ofere nici cea mai mică urmă de carieră. Poate fac asta în SUA, dar în Europa n-am întâlnit asta niciodată. Asta înseamnă că sunteți singurii responsabili pentru succesul carierei voastre. Nimeni nu vă va spune ”păi, la anul poți crește să ajungi lider de echipă (team lead), apoi manager, apoi CTO…”. Nici. Pe. Departe. Dimpotrivă, fix opusul este valabil; ați fost, sunteți și veți fi programatori, adică niște muncitori de fabrică scumpi, ai căror manageri ar fi foarte fericiți să vă externalizeze sarcinile, indiferent de ce v-ar spune. Nu acceptați o slujbă doar pentru bani. Companiile de software au devenit ateliere (sweatshops), unde trebuie să justificați salariul vostru absurd de mare cu un program nebun și așteptări nerezonabile. Și, cel puțin în cazul Elveției, nici nu există vreun sindicat care să vă ajute în caz că lucrurile iau o întorsură negativă. De fapt, există sindicate în Elveția, doar că nu sunt interesate de situații care nu le aduc câtuși de puțină expunere media. Și mai rău; în majoritatea locurilor de muncă veți fi hărțuiți, în mod special dacă sunteți femeie, membru al comunității LGBT, sau dintr-un grup de etnie non-caucaziană. Am fost martorul hărțuirii colegilor mei, femei și homosexuali. Unele părți ale industriei noastre sunt de-a dreptul dezgustătoare și nu trebuie să vă aflați în Silicon Valley pentru a trece prin asta. Nu aveți nevoie de Medium să citiți despre asta. Ați putea să o simțiți pe pielea voastră chiar aici, în Elveția. Multe bănci au locuri de muncă groaznice. Instituțiile financiare vă pun să vomitați cod timp de 15 ore pe zi, chiar dacă legislația elvețiană interzice acest lucru în mod explicit. Companiile farmaceutice vă pun să scrieți cod pentru a înșela teste și pentru a-i ajuta să evite regulamentul. Startup-urile vor până și pielea de pe voi, să lucrați 18 ore pe zi fără vreo remunerație, spunându-vă porcării de genul ”pentru că vă dăm opțiuni de acțiuni (stock options)” sau ”pentru că toți avem spirit de echipă”. Nu contează că sunteți Zach Holman și că puteți scrie în CV-ul vostru că ați creat GitHub de la zero: veți fi concediat pentru cele mai neînsemnate motive. Nu contează că aplicația generează mai mult de jumătate din traficul și venitul angajatorului vostru; echipa de API-uri vă va trata și pe voi, și ideile voastre, cu dispreț și superficialitate. Am fost întrebat dacă pot lucra pe gratis de oameni foarte cunoscuți în industrie, unii dintre ei chiar recomandați (featured) pe Wikipedia, lucru care mi se pare cu adevărat groaznic. Nu le voi împărtăși numele, dar voi împiedica orice junior să se apropie de ei, pentru că oamenii care lucrează fără etică nu merită creierul nimănui. De fiecare dată când un manager de Resurse Umane (HR) vă spune ”trebuie să faci asta (orice lucru care vi se pare greșit în raport cu scara voastră a valorilor) pentru că te plătim”, nu uitați să răspundeți: ”voi mă plătiți, dar eu vă dau la schimb creierul meu, iar eu refuz să respect acest ordin”. Și, cireașa de pe tort, vă vor poziționa într-un spațiu deschis (open space) și vor fi și mândri de asta, numai ei știu de ce. Spațiile deschise sunt un cancer. Sunt, fără cea mai mică urmă de îndoială, cel mai prost mod inventat vreodată de a aranja un spațiu și cel mai puțin adecvat pentru dezvoltarea de software — sau orice tip de muncă creativă. Nu uitați: faptul că dacă înțelegeți ceva nu înseamnă automat că trebuie să și fiți de acord. Nu vă supuneți autorității. Spuneți “la dracu', nu voi face ce-mi spui” și schimbați locul de muncă. Există locuri de muncă fantastice în lumea asta; nu multe, dar există. Am avut norocul să lucrez la câteva dintre ele. Nu lăsați o slujbă proastă să vă distrugă entuziasmul. Nu merită. Nu vă supuneți și mergeți mai departe. Sau, și mai bine, deveniți independenți. Probabil că ați auzit despre mitul ”Inginerulului Software 10x”, nu? Păi, uitați cum stau lucrurile: nu este un mit, dar nici nu funcționează cum credeți voi că funcționează. Funcționează, de fapt, din perspectiva angajatorului: un ”Inginer Software 10x” generează de 10 ori mai multă valoare decât suma plătită de angajator. Asta înseamnă că tu, sau ea, sau el primesc 100.000 CHF pe an, dar generează de fapt o valoare de peste 1 milion de franci. Și, desigur, ei primesc primele la sfârșit de an fiscal, pentru că, știți voi, capitalism. Am spus (Enough said). Fiți în continuă mișcare; fiți ca un rechin care înoată încontinuu, pentru că aptitudinile voastre sunt extrem de valoroase. Vorbiți despre salariul vostru, menționați-l cu voce tare, scrieți pe blog despre el, astfel încât colegii voștri să știe cât valorează munca lor. Companiile vor să vă țineți gurile închise în privința asta, pentru ca femeile să fie plătite 70% din ce sunt bărbații plătiți. Așa că vorbiți despre asta! Scrieți pe blog despre asta! Scrieți pe Twitter! Eu câștig 135.000 CHF pe an. Acesta este salariul meu actual. Dar tu? Și tu? Cu cât vorbim mai mult despre asta, cu atât va fi mai puțină inegalitate. Orice persoană care prestează muncă mea, cu experiența mea, ar trebui să câștige aceeași sumă ca mine, indiferent de rasă, sex, vârstă, sau echipă de fotbal preferată. Punct. Dar nu este așa. Nu este. Dacă sunteți bărbați albi, amintiți-vă de toate privilegiile de care v-ați bucurat încă din naștere, doar pentru simplul fapt că v-ați născut așa. Este responsabilitatea voastră să schimbați industria și prejudecățile sale cu privire la incluziunea socială. Este de datoria voastră să îi ajutați pe ceilalți. Luați decizii în mod conștient în viață. Fiți conștienți de acțiunile voastre și de urmările lor. Nu roșiți și nu vă fie rușine atunci când vă schimbați parerea. Spuneți ”îmi pare rău” când e nevoie. Ascultați. Nu fiți vreun șmecher. Aveți integritate și respect de sine. Nu criticați și nu luați în derâdere alegerile tehnologice ale colegilor voștri, pentru că fiecare are motivele proprii pentru alegerile făcute, iar acestea trebuie respectate. Fiți pregătiți să vă schimbați opinia în orice moment, prin învățare. Într-o zi poate vă va plăcea Windows. Într-o zi poate vă va plăcea Android. Mie, în ultima vreme, chiar au început să-mi placă unele părți din Android. Iar asta este în regulă. Toată lumea e în extaz despre Swift, dar, în realitate, la ce sunt eu mai atent zilele astea, este LLVM în sine. Cred că LLVM este cel mai important proiect software de astăzi, având în vedere impactul său pe termen lung. Objective-C blocks, Rust & Swift (cele mai îndrăgite două limbaje de programare compilate și de tip ”strongly typed” conform studiului efectuat în 2016 de StackOverflow), Dropbox Pyston, Clang Static Analyser-ul, ARC, Google Souper, Emscripten, LLVMSharp, Microsoft LLILC, Rubymotion, cheerp, aplicații pentru watchOS, Android NDK, Metal, toate acestea au apărut sau au fost posibile datorită LLVM. Există compilatoare care folosesc LLVM ca backend pentru cam toate limbajele de programare importante din ziua de azi. .NET CLR va interopera cu el, iar Mono deja se folosește de el. Facebook a încercat să integreze LLVM cu HHVM, iar WebKit a trecut de la LLVM la noul compilator B3 JIT JavaScript. LLVM este multi-platformă (cross-platform), multi-arhitectural de procesor (cross-CPU-architecture), multi-limbaj (cross-language), multi-compilator (cross-compiler), multi-testat, gratis și liber ca păsările (free as in gratis and free as a bird). Învățați tot ce puteți despre LLVM. Aceasta este galaxia unde se petrece adevărata inovație. Aceasta este fundația următorilor 20 de ani. Am intuit că .NET va fi ceva important când am urmărit introducerea sa în iunie 2000. Am intuit că iPhone-ul va fi ceva important când am urmărit introducerea sa în 2007. În ambele cazuri, lumea mi-a râs, literalmente, în față. În ambele cazuri, mi-am urmat intuiția și aș putea spune că toate s-au terminat cu bine. Urmați-vă intuiția. Ați putea fi și voi norocoși. API-uri grozave permit crearea de aplicații grozave. Dacă API-urile sunt nașpa, aplicația va fi și ea nașpa, indiferent cât de frumos arată. Tineți minte că stabil (chunky, solid) este mai bine decât ”vorbă multă” (chatty) și că aplicațiile client ar trebui să fie cât mai ”proaste”; puneți cât mai multă logică în seama API-urilor. Nu inventați propriile voastre protocoale de securitate. Învățați câteva tehnologii de server (server-side) și asigurați-vă că Node este una dintre ele. Lăsați REST de-o parte și îmbrățișați Socket.io, ZeroMQ, RabbitMq, Erlang, XMPP; ca și următor pas în evoluția de dezvoltare al aplicațiilor, explorați conceptul de în-timp-real (realtime). Realtime nu este doar pentru aplicațiile de chat. Scoateți interogările (polling) din ecuație definitiv. Ah, și începeți să dezvoltați boți în jurul acestor API-uri. Zic și eu. Mai simplu este mai bine. Întotdeauna. Nu uitați de principiul KISS. Și nu mă refer doar la nivel de interfață, ci la tot, până la cele mai adânci nivele ale codului vostru. Refactorizare, unit tests, code reviews, pull requests, toate aceste unelte sunt la dispoziția voastră pentru a vă asigura că acel cod pe care îl livrați este cea mai simplă arhitectură care funcționează. În acest mod veți reuși să construiți sisteme versatile și rezistente pe termen lung. Cel mai important lucru de ținut minte este că vârsta nu contează. Unul dintre copii mi-a spus ”Este imposibil, tată. Matematicienii depun cea mai bună muncă înainte să împlinească 40 de ani. Iar tu ai peste 80. Este imposibil să mai ai vreo idee bună acum”. Dacă la 80 de ani ești încă treaz și alert din punct de vedere mental, ai avantajul faptului că ai trăit o perioadă lungă de timp, ai văzut multe și ți-ai dezvoltat o perspectivă. Acum am 86 de ani, iar aceste idei le-am avut în ultimii ani. Apar noi idei, iar tu alegi bucăți de ici, de colo, iar timpul potrivit este acum, pe câtă vreme se poate să nu fi fost potrivit acum 5 sau 10 ani. Michael Atiyah, matematician și câștigător al Medaliei Fields și al Premiului Abel, citat într-un articol al Wired. Atâtă vreme cât inima vă va spune să continuați să programați și să construiți lucruri noi, veți fi mereu tineri. În 2035, peste 19 ani, la o conferință software similară celei de astăzi, cineva își va începe prezentarea așa: ”Bună ziua. Am 42 de ani, iar aceasta este povestea mea”. Sper că unul din voi va ține acea prezentare; altfel, va fi un bot AI. Veți prezenta câteva evenimente anecdotice din 2016, spre exemplu că a fost anul în care s-au stins din viață David Bowie, Umberto Eco, Gato Barbieri și Johan Cruyff, sau când SQL Server a devenit disponibil pe Linux, sau când Google AlphaGo a învins un campion de Go, sau când s-au scurs în aceeași zi informații despre Documentele Panama (Panama Papers) și despre Baza de Date Turcă cu Cetățenii (Turkish Citizenship Database), sau când Google a luat în considerare pentru prima dată Swift pentru Android, sau ultimul an în care oamenii s-au bucurat de acest lucru nesemnificativ numit intimitate. Ne vom afla la trei ani distanță de Problema Anului 2038, iar oamenii vor fi foarte agitați în privința asta. Cu siguranță nu știu ce se va întampla peste 19 ani, dar vă pot spune 3 lucruri care se vor întampla cu sigurantă: Și poate vă veți reaminti această prezentare cu zâmbetul pe buze. Vă mulțumesc mult pentru atenție.]]> I think adding everywhere a deinit method, with a deinit { print("Object X has been deinitialized.") } This way, if you expect object x to deinit at some point but it doesn't, you at least know you need to start searching, and where. Now, for the improved printing, to make this a bit cleaner and easier: func customPrint<T>( _ object: T, _ function: String = #function, // 1 _ file: String = #file, // 2 _ line: UInt = #line) { // 3 #if DEBUG // 4 let filename = URL(string: file)? .lastPathComponent .stringByReplacingOccurrencesOfString(".swift", withString: "") ?? "" if object is EmptyPrintFlagger { // 5 print("-- \(filename).\(function) [\(line)]") // 6 } else { print("-- \(filename).\(function) [\(line)] - \(object)") // 7 } #endif } func customPrint( // 8 _ function: String = #function, _ file: String = #file, _ line: UInt = #line) { customPrint(EmptyPrintFlagger(), function, file, line) // 9 } fileprivate struct EmptyPrintFlagger { } // 10 First, a few output examples, then we'll break down everything: // ObjectX.swift class ObjectX { deinit { customPrint() // 11 => ObjectX.deinit [60] customPrint("Example") // => ObjectX.deinit [61] - Example let i: Int? = nil let j: Int? = 5 customPrint(i) // => ObjectX.deinit [66] - nil customPrint(j) // => ObjectX.deinit [67] - Optional(5) } } First, the default parameters: they are built-in keywords that return the current function (1), file (2) and line number (3). You can pass in any String, but the purpose of these is to print the current location of customPrint. Next, we make sure no printing happens in production (2), because it (slightly) slows down the app, and clutters the system console with our logs. Finally, we will do a bit of extra work now, so we can be lazy at later times: we create a dummy, fileprivate struct (10), that acts as a flag inside our main customPrint (5). This way we are able to call customPrint without any parameters (8, 11), but still get the correct location (5, 6, 9, 10). Sadly, I didn't find any better way to differentiate between customPrint() and customPrint(x). While it wouldn't have been hard to just call customPrint(""), I hated the dangling - (7 with an empty String), and I'm also that lazy sometimes ¯\_(ツ)_/¯. Usually an app has fonts with well defined purposes. So why not let enums make our lives easier, a little bit? First, a couple of them, to define our font families and weights: struct Font { private enum Family: String { case avenirNext case proximaNova } private enum Weight: String { case regular case medium case demiBold case bold } } Then a method to easily create fonts: private static func baseFont(family: Family, size: CGFloat, weight: Weight = .regular, italic: Bool = false) -> UIFont { let font = family.rawValue let modifier = weight.rawValue + (italic ? "Italic" : "") return UIFont(name: "\(font)-\(modifier)", size: size)! } Finally, the properties: static let barButton = Font.baseFont(family: .avenirNext, size: 16, weight: .medium) static let header = Font.baseFont(family: .proximaNova, size: 18, weight: .demiBold) If the app has only one font family, everything becomes even simpler, by removing Family and the related params. Let's say we have a custom UILabel, which in turn has several types; maybe a StatusLabel that can be of type sold out and expired. The label would have several common properties, but each type would have something specific. How can we go about this? class StatusLabel: UILabel { enum StatusType { case soldOut case expired } init(type: StatusType) { super.init(frame: .zero) font = UIFont.common layer.cornerRadius = 4 textAlignment = .center translatesAutoresizingMaskIntoConstraints = false } } We now covered the common parts, and we could go ahead and set the backgroundColor and textColor like this: switch type { case .soldOut: tag = ViewTag.soldOutLabel.rawValue backgroundColor = .black textColor = .white case .expired: tag = ViewTag.expiredLabel.rawValue backgroundColor = .white textColor = .red } Nothing too ugly so far, but it's already clear that if we'll need to add more cases in the future, the amount of code will drastically increase. Instead, we can make use of tuples and closures: (tag, backgroundColor, textColor) = { _ -> (Int, UIColor, UIColor) in switch type { case .soldOut: return (ViewTag.soldOutLabel.rawValue, .black, .white case .expired: return (ViewTag.expiredLabel.rawValue, .white, .red) } } A bit more compact, and easier to scan columns for the differences. On the other hand, it's also clear that if there are too many different properties between different types, or if 4/5 types have one value for a property, but the 5th is different, this approach stops being as elegant or feasible.]]> Instead of checking if size.width > size.height, we can have three handy CGSize extensions: extension CGSize { var isCompact: Bool { return height > width + delta } var isWide: Bool { return width > height + delta } var isSquare: Bool { return abs(width - height) < delta } } For usage within viewWillTransition(to:with:) I don't think the delta will be really needed, but if we want to use these properties for our own custom views, it might come in handy. Modify its value to fit your own needs, of course. I.]]> I'm pretty sure this won't suit all cases, but, usually, a label / button should highly resist being vertically shrunk more than its intrinsic size. On the other hand, we won't always mind if it grows larger than its intrinsic size, but we'd like to avoid it, if possible. I, personally, find this a bit of a mouthful: label.setContentCompressionResistancePriority(.required, forAxis: .vertical) label.setContentHuggingResistancePriority(.defaultHigh, forAxis: .vertical) So, let's extract them into a method, with default values as added bonus. We'll also use an enum, so we can have "intermediate" values as cases: enum Priority: UILayoutPriority.RawValue { // Float case required = 1000 // .required case aboveHigh = 751 case high = 750 // .defaultHigh case low = 250 // .defaultLow case fitting = 50 // .fittingSizeLevel case none = 1 // For the cases where we don't care, since 0 is not a valid value. // Convert it back to a UILayoutPriority. var layoutPriority: UILayoutPriority { return UILayoutPriority(rawValue: rawValue) } } func setVerticalContentPriorities(compression compression: Priority = .required, hugging: Priority = .high) { setContentCompressionResistancePriority(compression.layoutPriority, for: .vertical) setContentHuggingPriority(hugging.layoutPriority, for: .Vertical) } For hugging, defaultLow is the default, while for compression it's defaultHigh, so we're upping them one level. Now it's much simpler to use (and easy to extrapolate to .horizontal): ]]>]]> label1.setVerticalContentPriorities() label2.setVerticalContentPriorities(hugging: .required) I. There's this handy feature, Debug -> View Debugging -> Show View Frames, which, if turned on, draws borders around views. The sad part is that it only works on the simulator. But we can (somewhat) easily simulate its behavior. First, we create a helper method that does the coloring: func activateFramesDebug() { guard debugFrames else { return } layer.borderWidth = 1 layer.borderColor = UIColor( hue: CGFloat(arc4random_uniform(100_000)) / 100_000, saturation: CGFloat(arc4random_uniform(100_000)) / 100_000, brightness: 0.5 + CGFloat(arc4random_uniform(50)) / 100, alpha: 1.0).CGColor } Then, we override drawRect / layoutSubviews, depending on the extension: // Easy to avoid production slips, easy way to turn on / off. #if DEBUG && false let debugFrames = true extension UILabel { // UIImageView, etc... override public func layoutSubviews() { super.layoutSubviews() activateFramesDebug() } extension UIButton { // UITableViewCell, UICollectionViewCell, etc... override public func draw(_ rect: CGRect) { super.draw(rect) activateFramesDebug() } } #else let debugFrames = false #endif It will not always work perfectly, it's not a perfect solution, but it's better than nothing. I'm also hoping there's an easier / better way to do this, but I don't know where we could hook up in an UIView extension. Let me know @rolandleth if you find a better solution. We can already use --graph and --decorate to get a pretty, colored commit tree, but we can create a function for less typing, more flexibility and more info: git_branch() { git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/' } # Short for git decorated log gdl() { branches='' if [ -n "$1" ] && [ -n "$2" ]; then branches=$1..$2 elif [ -n "$1" ]; then if [ $1 == "keyword" ]; then branches="$(git_branch)" else branches=$1.."$(git_branch)" fi else branches=master.."$(git_branch)" fi git log --oneline --graph --decorate $branches echo "${blue}Difference: ${green}$(git rev-list --count $branches) commits." } This will also print the number of commits between the two branches at the end, and the rules are as follows: keyword(make sure it's something unique), or master) and the current branch. The color helpers can be found here.]]> One usual approach is to create an enum, so your tags are more expressive by having a name: enum ViewTag: Int { case none case titleLabel case loginButton } Then tagging and retrieving views by tag will be safer, and easier to remember: let container = UIView() let titleLabel = UILabel() container.addSubview(titleLabel) titleLabel.tag = ViewTag.titleLabel.rawValue let loginButton = UIButton(type: .Custom) container.addSubview(loginButton) loginButton.tag = ViewTag.loginButton.rawValue if let titleLabel = container.viewWithTag(ViewTag.titleLabel.rawValue) as? UILabel, let loginButton = container.viewWithTag(ViewTag.loginButton.rawValue) as? UIButton { // Do stuff. } This is already much better than triple checking that the titleLabel's tag really is 1 and not 2, but what if we can go a step further and improve this, by extending UIView? extension UIView { func tag(with value: ViewTag) { tag = value.rawValue } func viewWithTag(_ tag: ViewTag) -> UIView? { return viewWithTag(tag.rawValue) } // Speaking of Swifty, we could also change the above in: func view(tagged tag: ViewTag) -> UIView? { return viewWithTag(tag.rawValue) } } So all of our code can be shorter, cleaner and prettier: ]]>]]> let titleLabel = UILabel() view.addSubview(titleLabel) titleLabel.tag(with: .titleLabel) let loginButton = UIButton(type: .Custom) view.addSubview(loginButton) loginButton.tag(with: .loginLabel) [...] // somewhere else if let titleLabel = view.viewWithTag(.titleLabel) as? UILabel, let loginButton = view.view(tagged: .loginButton) as? UIButton { // Do stuff. } The standard approach for this would be something like this: struct Theme { enum Color { case title case subtitle // [...] var color: UIColor { switch self { case .title: return UIColor.red case .subtitle: return UIColor.blue ... } } } enum Font { } } And using it throughout the app would look like this: func customLabel(text: String, color uiColor: Theme.Color, font uiFont: Theme.Font) { [...] label.textColor = color.uiColor label.font = font.uiFont [...] } let myLabel = customLabel( text: "Pretty label", textColor: .title, textFont: .title ) myLabel.textColor = Theme.Color.subtitle.color myLabel.font = Theme.Font.subtitle.font Another approach would be with nested structs and static constants for avoiding the color / font computed property (at the cost of slight, but unnecessary extra memory usage): struct Theme { struct Color { static let title = UIColor.red static let subtitle = UIColor.blue ... } } I find this approach pretty neat, but I'm not really sold vs simple extensions: extension UIColor { static var title: UIColor { return UIColor.red } static var subtitle: UIColor { return UIColor.blue } ... } extension UIFont { ... } And the usage would be the one we're all already familiar with, which involves a lot less typing, too: ]]>]]> func customLabel(text: String, color: UIColor, font: UIFont) { [...] label.textColor = color label.font = font [...] } let myLabel = customLabel( text: "Pretty label", textColor: .titleColor, // This is neatly inferred. textFont: .titleFont ) myLabel.textColor = .subtitle myLabel.font = .subtitle I!==]]>. First, we need to ask for the user's permission to access the folder, but for that, we need an NSSavePanel: let op = NSSavePanel() op.message = "Descriptive message here" op.canCreateDirectories = false op.canChooseDirectories = false op.showsHiddenFiles = false op.prompt = "Allow" op.title = "Allow access" op.isExtensionHidden = true op.directoryURL = URL(string: "/path/to/folder") // Depending on your purpose, you might need these to true op.allowsMultipleSelection = false op.canChooseFiles = false We then check if the panel's URL matches the one we need, and save a secure scoped bookmark in user defaults, for example: guard op.runModal() == .OK && openPanel.URL == requiredURL else { return } guard let bookmarkData = try? requiredURL.bookmarkData( options: [.withSecurityScope], includingResourceValuesForKeys: nil, relativeTo: nil), bookmarkData != nil else { return } UserDefaults.standard.set(bookmarkData, forKey: requiredURLKey) From now on, when we need access to this location, we can just resolve the data saved back into an NSURL, and use it; we just need to check if the bookmark staled in the meantime: ]]>]]> var staleBookmark = false guard let bookmarkData = UserDefaults.standard.data(forKey: requiredURLKey), let url = try? URL(resolvingBookmarkData: bookmarkData, options: [.withSecurityScope], relativeTo: nil, bookmarkDataIsStale: &staleBookmark) else { return } guard !staleBookmark else { return } // [...] Use the URL. Say you have a Mac app and you want to run a script, either to perform some action, or to return something, here's one way to do it. First, we create an NSTask, set the launch path of the handler, in our case ruby (we will use the default one, to be sure it exists), and the required parameters, if required: let task = Process() task.launchPath = "/usr/bin/ruby" task.arguments = [ Bundle.main.path(forResource: myScript, ofType: "rb")!, parameter1 ] This would be the same as running in the terminal: ruby /path/to/myScript parameter1 All good, but what if the script returns something and we want to use that? NSPipe to the rescue: let pipe = Pipe() task.standardOutput = pipe task.launch() let resultData = pipe.fileHandleForReading.readDataToEndOfFile() let resultString = String(data: resultData, encoding: .utf8) Returning a value with a script is as easy as printing: def optional_method 'I am the return value' end puts optional_method + ' of a script' This would result in resultString's value to be I am the return value of a script. Been!]]> This week, Christmas arrived early for developers. Swift is now open source, it has a package manager, there is a dedicated evolution / roadmap repo for it (the changes for 3.0 look really nice) and Apple already accepted a lot of pull requests. Can't wait to see what will happen with Swift in the next year or so, but I do know its future looks bright and it looks like others feel the same, even more so than me.]]> I recently saw Joe's post about updating Xcode plug-ins, but since I'm really lazy, updating them one by one didn't suffice, so here's my take on it. In case you want to skip straight to the desert, here's the gist. First, we change our script's directory: # You might need these. # require 'pathname' # require 'fileutils' # In case we want to also print the already updated plug-ins DISPLAY_ALREADY_UPDATED = false plugin_path = File.expand_path('~/Library/Application Support/Developer/Shared/Xcode/Plug-ins') Dir.chdir(plugin_path) files = Dir['*.xcplugin'] Then we iterate through the files: files.each do |plugin| # Lazy, lazy :) xcode_uuid_key = 'DVTPlugInCompatibilityUUID' plugin_uuids_key = xcode_uuid_key + 's' # This path needs to escape the backslash, so we can't reuse the one above :( plugin_plist = "~/Library/Application\\ Support/Developer/Shared/Xcode/Plug-ins/#{plugin}/Contents/Info.plist" plugin_uuids = `defaults read #{plugin_plist} #{plugin_uuids_key}` latest_xcode_uuid = `defaults read /Applications/Xcode.app/Contents/Info #{xcode_uuid_key}`.gsub! "\n", '' # If the value is already there, skip and optionally notify if plugin_uuids.include? latest_xcode_uuid uptodate_plugins += 1 puts "#{green}#{plugin} is already up to date." if DISPLAY_ALREADY_UPDATED next end puts "#{yellow}Updating #{plugin}..." system "defaults write #{plugin_plist} #{plugin_uuids_key} -array-add #{latest_xcode_uuid}" updated_plugins += 1 end Then, at the end, the finishing touches: # For a bit of flavor red = "\e[31m" green = "\e[32m" yellow = "\e[33m" white = "\e[0m" if updated_plugins == 0 if uptodate_plugins == 1 puts "\n#{green}You have only one pluging, and it was already up to date.#{white}" else puts "\n#{green}All #{uptodate_plugins} plugins were already up to date.#{white}" end elsif uptodate_plugins == 0 if updated_plugins == 1 puts "\n#{green}You have only one plugin, and it has been updated." else puts "\n#{green}All #{updated_plugins} plugins have been updated." end elsif updated_plugins == 1 && uptodate_plugins == 1 puts "\n#{green}#{updated_plugins} plugin has been updated, #{uptodate_plugins} plugin was already up to date.#{white}" elsif updated_plugins == 1 puts "\n#{green}#{updated_plugins} plugin has been updated, #{uptodate_plugins} plugins were already up to date.#{white}" elsif uptodate_plugins == 1 puts "\n#{green}#{updated_plugins} plugins have been updated, #{uptodate_plugins} plugin was already up to date.#{white}" else puts "\n#{green}#{updated_plugins} plugins have been updated, #{uptodate_plugins} plugins were already up to date.#{white}" end Thanks for the inspiration, Joe!]]> Let's say you have a method which performs some networking actions, which can obviously fail, but in different ways: func performNetworkStuff() { fetchSomething(then: { condition in if condition { } // failure due to condition else { failureAction1() failureAction2() variable1 = value etc... } }) { // failure due to network failureAction1() failureAction2() variable1 = value etc... } } But instead, we could DRY it up a little bit, by storing the failure actions into a block beforehand: func performNetworkStuff() { let handleFailure = { failureAction1() failureAction2() variable1 = value etc... } fetchSomething(then: { condition in // success if condition { } // failure due to condition else { handleFailure() } }) { // failure due to network handleFailure() } } This way you can be sure the same code is properly run on all failures, and if something needs to be changed, it has to be changed in only one place.]]> I stumbled upon a problem recently (the full story can be found on Apple Developer Forums) and the short version is: running the app or the tests without code coverage enabled always worked fine, but turning it on caused errors to appear, breaking the build process when trying to test. Hunting attemp #75. One of the errors raised was Segmentation fault 11, which appeared for several Swift files during the Compile Swift source files phase. This is an error I remembered having in the early days of Swift when writing daring short syntax, like: // UILabel convenience init convenience init(text: String, font: UIFont) // UIFont helper class func boldFont(ofSize size: CGFloat) -> UIFont { return UIFont(name: "AvenirNext-Bold", size: size)! } UILabel( text: "Test", font: .boldFont(ofSize: 15) ) // This wouldn't compile, with segmentation fault 11, // and I would have to write the full thing, UIFont.boldFontOfSize(15) So I went in one of the smallest Swift files with issues, added "long" syntax everywhere, and, weirdly enough, it was still not compiling. Next, in a desperate attempt, I went ahead and stripped the code of all ternary conditions and optionals (replacing with bangs !), and it now worked. Huh? Anyway, adding back the old code, bit by bit, revealed the culprit - variables with ternary conditioned default values, like: private let leftMargin: CGFloat = isPad ? 32 : 16 // var made no difference private let leftMargin: CGFloat = CGFloat((1 == 1) ? CGFloat(32) : CGFloat(16)) // desperation moment Changing them to computed properties, or lazy variables did the trick, and now code coverage finally works: private var leftMargin: CGFloat { return isPad ? 32 : 16 } private lazy var leftMargin: CGFloat = { return isPad ? 32 : 16 }() This whole thing seems so simple, I have the feeling I'm just missing something obvious; please let me know if so.]]> In case you haven't already read them, Advanced & Practical Enum usage in Swift and Match Me if you can: Swift Pattern Matching in Detail have to be one of the best posts about Swift enums. I'd say you should really read and bookmark them.]]> You. Just a slight improvement for last post: sometimes I don't have the branch pushed on GitHub, so why not automate that too? perform_pull_request() { branch="development" branch_to_push="$(parse_git_branch)" git push origin "$branch_to_push":"$branch_to_push" if [ -n "$1" ]; then branch=$1 fi pr_url=$(hub pull-request -b "$branch" -m "$(formatted_git_branch)") if [ $? -eq 0 ]; then open "$pr_url" fi } I also found some nice helpers for easier color printing: // A bit redundant to have both of them, but I like having extra stuff // instead of not having something when I could use it. black=$(tput setaf 0) red=$(tput setaf 1) green=$(tput setaf 2) yellow=$(tput setaf 3) blue=$(tput setaf 4) magenta=$(tput setaf 5) cyan=$(tput setaf 6) white=$(tput setaf 7) reset_color=$(tput sgr0) black() { echo "$black$*$reset_color"; } red() { echo "$red$*$reset_color"; } green() { echo "$green$*$reset_color"; } yellow() { echo "$yellow$*$reset_color"; } blue() { echo "$blue$*$reset_color"; } magenta() { echo "$magenta$*$reset_color"; } cyan() { echo "$cyan$*$reset_color"; } white() { echo "$white$*$reset_color"; } Now I can do these: ]]>]]> red "Something went wrong." cyan "Multicolored $magenta example.$reset_color" echo "${yellow}This is a warning.$blue For reason x." I recently started using Tower, which, sadly, doesn't support opening Pull Requests. Since I was used to this after using GitUp and SourceTree, I now found the process of opening Pull Requests quite cumbersome. Here's my take on it, by making use of GitHub's hub gem and a bit of .bash_profile tinkering: # This will return a string with your current branch name. parse_git_branch() { git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ \1/' } # This will return a string with a friendly branch name # If you have a ticketing system, the naming convention might be # XXX-123-feature-name, so the output would be XXX-123 Feature name formatted_git_branch() { branch="$(parse_git_branch)" # Replace all '-' with spaces pr_title="${branch//-/ }" # Check if it starts with XXX, otherwise we just return the capitalized name if [[ $pr_title == XXX* ]]; then pr_title=${pr_title:0:3}-${pr_title:4} # XXX-123 test if [[ ${pr_title:7:1} == " " ]]; then fl="$(echo ${pr_title:8:1} | tr '[:lower:]' '[:upper:]')" pr_title=${pr_title:0:8}$fl${pr_title:9} else # XXX-1234 test fl="$(echo ${pr_title:9:1} | tr '[:lower:]' '[:upper:]')" pr_title=${pr_title:0:9}${fl^}${pr_title:10} fi else fl="$(echo ${pr_title:0:1} | tr '[:lower:]' '[:upper:]')" pr_title=$fl${pr_title:1} fi # Using ${var^} returns me a 'bad substitute' error, thus the echo workaround echo "$pr_title" } # By default it will use the current branch as from and the -b for the base perform_pull_request() { # Default to the development branch as base branch="development" # If we pass an argument, that will be used as the base branch if [ -n "$1" ]; then branch=$1 fi # The `hub` command outputs the Pull Request `URL` pr_url=$(hub pull-request -b "$branch" -m "$(formatted_git_branch)") # Don't try to open the URL if anything went wrong (if the PR already exists, for example) if [ $? -eq 0 ]; then open "$pr_url" fi } alias gitpr=perform_pull_request Final step is to add an Alfred workflow to easily run this from anywhere (here you can read on how that's done): cd ~/path/to/project && gitpr Now I can open a Pull Request from anywhere, defaulting to the most used base, development, but also being able to pass it in as an argument. A.]]> I). You can now search for keywords within WWDC videos. ]]>]]>. UIScrollViews are a different kind of beast when created with auto layout, but the basic idea is to constrain it to a fixed frame, and let the constraints of its subviews determine its contentSize. So let's start with that: private lazy var scrollView: UIScrollView = { let sv = UIScrollView(frame: CGRect.zero) self.view.addSubview(sv) sv.translatesAutoresizingMaskIntoConstraints = false sv.alignTop("0", leading: "0", bottom: "0", trailing: "0", toView: self.view) return sv }() We have a scrollView that is the same size of our view, nothing fancy. Let's say we have two layouts, one with 3 subviews, one with 2, at 200px height each, spaced out by 10px. Still nothing fancy, but the catch is that the last subview has to always be pinned to the bottom of the screen. On devices with the screen height >= 630px, this will be simple, because scrolling won't be needed - pin the bottom edge of the last subview to the view itself. What about the other devices? On the first layout, the contentSize.height of our scrollView will be 3 * 200 + 3 * 10 = 630px - when scrolled to the bottom, the last subview will be at the very bottom. All good, so far. On the second layout, the contentSize.height of our scrollView will be 2 * 200px + 2 * 10 = 420. We have to constrain the last subview's bottom edge with the scrollView's bottom edge, to determine its contentSize.height, and, sure, we could calculate the required top space on each device / layout and modify the constant accordingly, but the solution is much more simple and flexible: [...] // Add it to the scrollView, just like the rest scrollView.addSubview(lastSubview) lastSubview.alignBottomEdge(withView: scrollView, predicate: "0") // 1 [...] var lastSubviewTopConstraint: NSLayoutConstraint? var lastSubviewBottomConstraint: NSLayoutConstraint? if screenHeight < 630 { lastSubviewTopConstraint = lastSubview.constrainTopSpace(toView: secondSubview, predicate: "10@999") // @999 means priority 999 (we're starting in layout 1) } // Also constrain the bottom to the view itself lastSubviewBottomConstraint = lastSubview.alignBottomEdge(withView: view, predicate: "0@1") // Then just toggle their priorities: lastSubviewTopConstraint?.priority = layoutOne ? 999 : 1 // 2 lastSubviewBottomConstraint?.priority = layoutOne ? 1 : 999 // 3 Even though the scrollView is constrained to the view's height, don't be mistaken: the lastSubview won't be pinned to the bottom (1); this constraint only determines the scrollView's contentSize.height. The bottom constraint on the scrollView always determines the scrollView.contentSize.height. In addition, the top constraint (2) determines the lastSubview's position, when the high priority is set. The bottom constraint (3) on the view determines the lastSubview's position, when the high priority is set. Throughout this post I'm making use of FLKAutoLayout, a great Auto Layout library.]]> If you're using any kind of offsite assets, like webfonts or Font Awesome icons, take into consideration that these will be blocked by iOS 9's content blockers - either bring them locally on your server, either have fallbacks; I chose the former, which also improved the pages' loading times.]]> I recently bought a couple of cases for the Apple Watch and I had the option to ship via USPS or DHL, and since DHL was only $10 more expensive, I thought sure, it's worth it. Sorry for the rant post, but I'm really, really disappointed. Long story short, if there are other options, I doubt I will choose DHL again - I had a terrible customer service experience. The problems started with the fact that I wasn't announced when the package arrived at the customs and couldn't be moved further because they need extra info - I had to call them for 3 days in a row, and to drive to one of their offices to get the needed info. Next up, their commission to pay the customs taxes on my behalf is 3-6 times more expensive than other shipping services, and the minimum is a fixed price: my package was $30, the customs duty is somewhere around $7 and their commission is $27 - 4 times the duties and almost the package's value. Finally, the other option would have been to pay the taxes myself, but then I would have had to wait until the money actually entered the customs' account and to also prepare some extra papers, thus pushing the actual shipping time to almost 2 weeks. The process for shipping by USPS or any other postal service is dead simple: receive a notification in my mailbox that my package has arrived, go to the postal office, pay there, get my package, come back home. In the end, it wasn't about those extra $35 per se - if the shipping would have been an extra $25 and the commission $10, it would have felt more fair. I also wish they would have shown a little more care about their customer; maybe other countries have it better - and I do hope so.]]> We have in the app a generic button, styled to look the same across the app - from shadows to title. Some places require a smaller font though, and I didn't really want to duplicate code, so something had to be done. Here's my little trick for it: ]]>]]> // titleLabel is nil only for system buttons, as per documentation button.titleLabel!.font = UIFont( name: button.titleLabel!.font.fontName, size: button.titleLabel!.font.pointSize - 2 ) If you want to animate the hidden property of a view, or animate the title of a button, the solution is simple - use UIView.transitionWithView(button [...]. If you want to animate the title of several buttons, the solution is just as simple - use UIView.transitionWithView(containerView [...]. The problem arises when you want to animate constraint changes at the same time: they won't be animated anymore, no matter if you encapsulate them in the same transitionWithView call, a new one, or inside an animateWithDuration call. The only solution I found is to do a transitionWithView call for each button that needs its title changed, and animate the constraints as usual, inside an animateWithDuration call. These kind of changes can also be animated with a CABasicAnimation / CATransition, and adding that animation to the view's layer, but in case you need transitionWithView, or don't know why your constraints aren't animating, this might be the reason. This is one of the best articles I read about Auto Layout, and one of the best articles about a given topic, altogether. I really, really enjoyed reading it.]]> One of the best explanations about pattern matching in swift I've seen so far. ==Insta-bookmark.==]]> Even though the names might sound self-explanatory, I never really understood how they work. But I finally found a proper explanation about them: A strong hugging priority limits the view from growing much larger than the content it presents. A weak priority may allow a view to stretch and isolate its content among a sea of padding. Compression resistance describes how a view attempts to maintain its minimum intrinsic content size. I'm considering to buy the book too, while I'm at it. Update, 2015-09-16: This piece is really, really nice as well. As he points out: ]]>]]> When I first started out, I actually wish I could have had something like this guide to help me out. I recently saw this medium post linked on The Loop, and at first I thought "sure, this is a major issue, but, on the other hand, it seems pretty fair to be paid by the number of plays, that's what streaming means; I can't see any another way", but eventually I reached the part where he offers a solution; and I think it's perfect: The premise behind Subscriber Share is simple: the only artists that should receive your money are the artists you listen to. Subscriber Share simply divides up your $7 ($10 per month, -30% service fees)). I hope the music industry will adopt this, it just feels right.]]> This was a fun Saturday morning read. And ==very== interesting.]]> Continuing where we left off last time, is another little trick I like to use with enums: func collectionView(_ collectionView: UICollectionView, layout: UICollectionViewLayout, referenceSizeForFooterInSection section: NSInteger) -> CGSize { if .products == Section(rawValue: section), conditionOne, !conditionTwo { return CGSize(width: 10, height: 20) } else if .shippingDetails == Section(rawValue: section), anotherCondition { return CGSize(width: 10, height: 40) } return .zero } I think this makes the code easier to read at a glance: I can quickly scan the left side of the ifs until I find what I'm looking for, and after that I can check for other conditions, if any. Sure, this could have been written like: switch Section(rawValue: section)! { case .products where conditionOne, conditionTwo: return CGSize(width: 10, height: 20) case .shippingDetails where anotherCondition: return CGSize(width: 10, height: 40) default: .zero } But in case you don't want to write a switch, for whatever reason, making the if blocks look more like one might be a big plus to readability. I recently wrote that I added a visual representation for when a search is performed, by autocompleting the search field with the search query. Everything appeared to work as expected. Some time ago I added Tinfoil Heroku addon and never saw any security problems. I even thought I will never see one. But I started seeing a couple about Cross-Site Scripting in HTML tag. I went to their dashboard, clicked on reproduce the attack, it took me to my website, with a search performed, the search query was autocompleted, as expected, but a tag was also injected in the search field: <input type="text" class="search banner-search" name="query" size="17" maxlength="30" placeholder="Search..." value="tinfoil_yGU31" tinfoil_xss_in_tag="fa1a65120bd41529ad60271db0cef24aab4a57c3"> At first, I tried to find a solution for the ruby approach. It seemed the bug and the solution were pretty simple: <input type="text" class="search banner-search" name="query" size="20" maxlength="30" placeholder="Search..." value='<%= query_params %>'/> // Replace the ' with "?. // Try ERB::Utils.html_escape. // Try other helpers... But that didn't work out well, either, for obvious reasons. So I resorted to the jQuery approach I wrote. I also changed the split to be done by query= instead of =, and I'm pretty sure other edge cases are still possible, but I can't find a better approach for now. This is a really nice resource, I'd suggest on at least reading it, if not bookmarking. Even seasoned developers will find useful stuff inside, but if you are a junior, or have one in your team, it's definitely a must-read. It goes hand-in-hand with this awesome resource list.]]> This will only work for enums that are Ints, start from 0, and increment by 1 - for example tableView / collectionView sections, but I think it's a nice little trick that I always use: private enum Section: Int { case products case shippingDetails case paymentDetails case totalValue case numberOfSections } Now you can do the following: func numberOfSections(in collectionView: UICollectionView) -> Int { return Section.numberOfSections.rawValue } func collectionView(_ collectionView: UICollectionView, cellForItemAt indexPath: NSIndexPath) -> UICollectionViewCell { let cell: UICollectionViewCell // Since numberOfSections returns Section.numberOfSections, // it's safe to force unwrap here; it will never crash. switch Section(rawValue: indexPath.section)! { case .products: // [...] case .shippingDetails: // [...] case .paymentDetails: // [...] case .totalValue: // [...] // Now comes the downside: either do a default case, // either add the .numberOfSections case, both of which // do nothing, but I think it's worth the minor inconvenience. case .numberOfSections // default: } } Then, in the future, if the requirements change, and you need the paymentDetails section to be second, you don't have to remember which index it was and make sure you change it accordingly in didSelectItemAtIndexPath, as well - just move it in front of shippingDetails inside the enum declaration. Or, if you need to add / remove a section, you don't need to remember to update numberOfSections, it will continue work by itself. I.]]> I wanted a visual representation and helper for the user when a search is performed on the site. So I thought about autocompleting the searched term in the search field. At first I tried with jQuery: var query = decodeURIComponent(location.search) .split("=")[1] .replace(/[\+]/g, " "); $('input.search').val(query); $('input.banner-search').val(query); But this had a small delay and the slight inconvenience of having to replace + with a space, and to decode the URL. So I ended up with some really simple ruby, inside layout.erb: ]]>]]> <% query_params = request.params['query'] %> <input type="text" [...] One more thing that helps me with the Fastlane flow is an Alfred workflow to run the lanes. It was really easy to create, and here's how: open Alfred, go to Workflows, press +, Templates, Essentials and choose the Keyword to Terminal Command. Give it a name, a description, and a keyword (without a parameter), then paste the following in the script field: cd ~/path/to/project && fastlane release_minor && exit This will open the Terminal, cd to that path, run the fastlane command, then close the respective window. Update, July 16, 2015: Fastlane got a nice update, where you can pass parameteres from the command line, so I improved the Alfred workflow - I set it to require a parameter, with space, then changed the terminal command to: cd ~/path/to/project && fastlane release scheme:{query} && exit Now I can write in Alfred release minor, as if I wrote in terminal fastlane release scheme:minor, and Faslane does the same thing as it did before with fastlane release_minor. ==Nice.==]]> Let's say we have a textField with an UIPickerView as its inputView. The first problem is that we want the textField to not be editable, because we populate it with the value picked from the picker. This is the simple part: func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool { return emailTextField == textField } The second problem is that the textField is still tappable, it still gets focus and the caret is still visible. We can't return true on shouldBeginEditing, we can't disable interactions, and resigningFirstResponder doesn't help either, since it closes the picker, and the whole idea is to use the picker natively, without creating a new set of methods that showPicker and hidePicker. Since a textField's tintColor affects its caret too, the solution for our second problem turned out simple, after all: Bonus tip: private var email = "" { didSet { emailTextField.text = oldValue } } This will automatically update the textField whenever This is the last post about how we use fastlane, and it will present the Deliverfile. default_language "de-DE" email itc-username automatic_release false skip_pdf true hide_transporter_output screenshots_path "../../../../Google Drive/iTunes Assets/images" if ENV["VERSION_NUMBER"] version ENV["VERSION_NUMBER"] end changelog( "en-US" => File.read("../../../../Google Drive/iTunes Assets/changelog/en.txt"), "de-DE" => File.read("../../../../Google Drive/iTunes Assets/changelog/de.txt"), "fr-FR" => File.read("../../../../Google Drive/iTunes Assets/changelog/fr.txt"), "it-IT" => File.read("../../../../Google Drive/iTunes Assets/changelog/it.txt") ) success do system("say 'Successfully submitted a new version.'") end error do |information| # custom exception handling here raise "Something went wrong: #{information['error']}" end Pretty straightforward: set the required variables (this is where ENV["VERSION_NUMBER"] from last time came in handy), while the screenshots and the changelog are in Google Drive, where the guys from the Product and / or Design team can modify if needed. I just need the ok, we can release now after the files have been updated and fastlane lane does the rest. ==Awesome==.]]>. One of the problem I had was with Fastlane's new increment_version_number and increment_build_number, is that they use Apple's agvtool to increment the required values, but I use a script that already increments the CFBundleVersion and the CFBundleShortVersionString in my plist, and there's no way to update the CURRENT_PROJECT_VERSION from the project's setting, that agvtool uses. Now, Fastlane uses an inhouse version of shenzhen under the hood, which has a PlistBuddy module, but doesn't have a set method. Courtesy of Nassim's gist, here is the updated module, via pull request: def set(file, key, value) output = `/usr/libexec/PlistBuddy -c "Set #{key} #{value}" "#{file}" 2>&1` output == "" ? value : nil end Now you can use it in your Fastfile: lane :release do ensure_git_branch(branch: 'release') increment_version(:minor) [...] end def increment_version(type) split = version_number.split('.') if type == :major split[0] = "#{split[0].to_i + 1}" split[1] = "0" split[2] = "0" elsif type == :minor split[1] = "#{split[1].to_i + 1}" split[2] = "0" else split[2] = "#{split[2].to_i + 1}" end Shenzhen::PlistBuddy.set(plist, 'CFBundleShortVersionString', split.join('.')) end I know it's not much, but I'm happy I contributed to an awesome tool.]]> Last time I stopped at build incrementation. Let's continue with better lanes: # From lane :release do build_app 'Release' end # To lane :release_patch do # | :release_minor | :release_patch | :release_quick_fix release :major # | :minor | :patch | :quick_fix end # While the Hockey lanes changed to lane :hockey_debug do lane :hockey_beta do build_app 'Debug' # | 'Beta' end Where the release(version) and the submit(binary_flag) methods looks like this: def release(version) # If we are submitting a quick fix, we don't really care what branch we are on # as long as it contains the quick-fix text, to be sure we are where we want to be if version == :release_quick_fix # If current_branch doesn't include 'quick-fix', ensure to something that will surely fail ensure_git_branch branch: 'quick-fix' unless @current_branch.include? 'quick-fix' else # This is a fastlane helper that checks if we are on the required branch ensure_git_branch branch: 'development' end # This will increment the version, based on the kind of release increment_version version build_app 'Release' submit :with_binary end def submit(binary_flag) deliver( skip_deploy: binary_flag == :without_binary, metadata_only: binary_flag == :without_binary ) end As you can see, all the logic was mitigated into 3, more concise, methods, build_app, submit and release. Now for the increment_version method: def increment_version(type) # In case the lane fails, we revert the version number @previous_version = version_number split = version_number.split '.' if type == :major split[0] = "#{split[0].to_i + 1}" split[1] = '0' split[2] = '0' elsif type == :minor split[1] = "#{split[1].to_i + 1}" split[2] = '0' else # For patch and quick_fix split[2] = "#{split[2].to_i + 1}" end new_version_number = split.join '.' # Using this in the Deliverfile ENV['VERSION_NUMBER'] = new_version_number Shenzhen::PlistBuddy.set plist, 'CFBundleShortVersionString', new_version_number end def build_number "#{Shenzhen::PlistBuddy.print(plist, 'CFBundleVersion')}" end Next time I will talk about tagging, committing, pull requesting or pushing and sending a message to Slack.]]> I previously wrote that I can't wait to start using Fastlane full-time, but I never really got to it, since I lost about a day or so and didn't manage to get the provisioning and signing to work. Since then quite a few updates were issued and things got easier, so I finally managed to make the switch. I will walk through the process in a few posts. First things first, our release process is more or less release to Hockey with a debug build until a desired state has been reached, release again to Hockey with a release build and make sure everything is working properly, then release to the App Store. desc "Deploy a new major version to the App Store" lane :release do [..] end desc "Deploy a new version to Hockey with debug / release settings" lane :hockey_debug / :hockey_release do [..] end [...] etc Most of the code inside the lanes are the same, differences being the type of the release, the branch check, the slack output message and other minor things. So instead of duplicating code over and over, all lanes call the same build_app(type) method: lane :release do build_app 'Release' end lane :hockey_debug / :hockey_release do build_app 'Debug' / 'Beta' end [...] etc And the build_app(type) method looks like this: def build_app(type) cert( development: type == 'Debug', username: dev_username, team_id: team_id ) pem( development: type == 'Debug', generate_p12: true, app_identifier: app_identifier, username: dev_username, team_id: team_id, p12_password: "xxx", pem_name: "aps_#{type}", save_private_key: true ) sigh_params = { app_identifier: app_identifier, username: dev_username, team_id: team_id, team_name: team_name, output_path: "./fastlane/profiles", filename: "#{type}.mobileprovision" } if type == 'Beta' sigh_params[:adhoc] = true elsif type == 'Debug' sigh_params[:development] = true end sigh sigh_params ipa_params = { workspace: "xxx.xcworkspace", configuration: "#{type}", scheme: "#{type}", # the auto-increment build number script is at the bottom. # Because archiving builds again, that would mean # the build number would be increased twice. We don't really need the archive anyway. archive: false, sdk: "iphoneos", clean: true } # Special case, because setting the signing to Automatic in Xcode # uses the Release profile, but we want the AdHoc profile. if type == 'Beta' ipa_params[:embed] = "./fastlane/profiles/adhoc.mobileprovision" end ipa ipa_params hockey_params = { api_token: "xxx", notify: (type == "Release") ? "0" : "1", # 0 = Do not notify, 1 = Notify status: (type == "Release") ? "1" : "2", # 1 = Download restricted, 2 = Downloadable release_type: "0", # Beta, Store creates a new app alltogether dsym: Actions.lane_context[Actions::SharedValues::DSYM_OUTPUT_PATH] } if type != 'Release' hockey_params[:tags] = 'alpha' end hockey hockey_params end Pretty straighforward and easy to set up: use cert to check and refresh the certificates, pem to check and refresh the push certificates (it lets you know if you have over 30 days left on them, or not), sigh to refresh and download the provisioning profiles, ipa to build the .ipa file (it uses the adhoc profile for the Beta builds) and, finally hockey to upload the build to Hockey, with the required settings. The script to increment the build number, as promised - go to your Build Phases tab, click on Editor -> Add Build Phase -> Add Run Script Build Phase and paste this in there: buildNumber=$(/usr/libexec/PlistBuddy -c "Print CFBundleVersion" "$INFOPLIST_FILE") buildNumber=$(($buildNumber + 1)) /usr/libexec/PlistBuddy -c "Set :CFBundleVersion $buildNumber" "$INFOPLIST_FILE" Next post I will talk about adding major / minor / patch release types and editing the plist to auto-increase the version number.]]> I found krakendev's idea quite interesting. There are two downsides: ERRORs, even in this case: NSLog(@"ERROR: Reason here"); Sadly, I'm not good with bash, so I don't really know how to improve these downsides, but you might find it helpful, nonetheless. Keeping it for future reference, as well.]]> I.]]> When I wrote about iCloud sync, I said the sync logic is the shittiest ever, so this is the first attempt at improving it. Firstly, the migrateOldData got an update: let event = EPEvent( id: Events.last?.id ?? 0 + 1, ... ) EPEvent.data.append(event) init(...) { modificationTime = Date.timeIntervalSinceReferenceDate() } modifyEventInAnyWay() { modificationTime = Date.timeIntervalSinceReferenceDate() } Then the sync. ]]>]]> for iCloudEvent in document.data { var addEvent = true for localEvent in Events where localEvent.id == iCloudEvent.id { if localEvent.lastModification < iCloudEvent.lastModification { // Update local events. } // We use an inner if block instead of adding it in the where statement, // because if iCloud modification is older than local, // we don't want to add a new event, just not update the local one. addEvent = false } // Add new event. if addEvent { Events.append(iCloudEvent) } I saw this little thing today, by the guys from thoughtbot. sh$ gitsh then simply type commands without git: status add . commit -m "Message" push Be sure to check their newest iOS open source, as well, Tropos.]]> I already love Swift 2.0. First, defer, to never forget to enableUpdates for the query: query.disableUpdates() defer { query.enableUpdates() } // Do stuff. Secondly, for x in y where condition loops: ]]>]]> for iCloudEvent in document.data { // I know, I know, worst sync ever; I will try to improve it at some point. for localEvent in Events where localEvent.name == iCloudEvent.name { // Update local events. } // Append to local events. } Here is another nice read about protocols; method overriding, in particular. Some examples: ]]>]]> // Our generic class. class DBStore<T> { func store(a: T) { store T } } protocol Storeable { } protocol Interim { } // Hello, generic Method Overloading by Protocol: class DBStore<T> { func store<T: Storeable>(a: T) { // Store T. } func store<T: Interim>(a: T) { // Compress T. } // And more advanced: func store<T: Storeable>(a: T) where T: Equatable { // Store T. } } While.]]> First things first, I want to migrate old users' data to Swift objects: // Didn't want to use CoreData just for one object, so I was creating an array of arrays: // array[0] = recurring time, array[2] = name, etc. // Don't laugh, it was a long time ago :) unarchiver = [[NSKeyedUnarchiver alloc] initForReadingWithData: dat]; kConstants.allTheData = [unarchiver decodeObjectForKey: @"dic"]; [unarchiver finishDecoding]; to: let unarchiver = NSKeyedUnarchiver(forReadingWithData: decryptedData) if let events = unarchiver.decodeObject(forKey: "data") as? [EPEvent] { EPEvent.data = events } else if let oldData = unarchiver.decodeObject(forKey: "dic") as? [Any] { Utils.migrate(oldData) } else { EPEvent.data = [] } unarchiver.finishDecoding() where migrateOldData is: // The fallbacks should never happen, but it doesn't hurt to be extra sure. for event: [Any] in data { let event = EPEvent( recurring: event[0] as? String ?? EPEvent.recurring, sum: event[1] as? Float ?? 0.0, name: event[2] as? String ?? "Invalid data", eventComponents: event[3] as? DateComponents ?? EPEvent.alertTime, reminderComponents: event[4] as? DateComponents ?? EPEvent.alertTime, fireTime: event[5] as? String ?? "Day of event", repeats: event[6] as? String ?? "None" ) EPEvent.data.append(event) } saveData() // This just writes to file, encrypted Trying it out: ]]>]]> print(EPEvent.data.first?.recurring) // -- Optional("Daily") I started doing it at some point, but never got through with it, but with the release of Swift 2.0, there's no better chance to try all the new things and come out with a better app as well. I will be writing my journey, which won't be too long, since the app is pretty simple, but I can see quite a few hurdles ahead. I will prefix post titles with [EP] when writing about it. Stay tuned. Super Fast Soft Shadow system for Unity: ]]>]]> We're the authors of Chipmunk2D physics engine and Cocos2D-SpriteBuilder maintainers. We recently rewrote the Cocos2D renderer and added lighting effects, and now we're back to writing Unity plugins! [...] Shadow mask generation occurs in a single pass - it doesn't use expensive image filters or pixel shaders to soften the shadows. This means it runs great on mobile! [...] Physically realistic penumbra, umbra, and antumbra rendering is based on configurable light sizes. This produces accurate rendering when the light source is larger than the objects casting the shadows. Found this gem on Medium and bookmarked it. I think you should too.]]> This is really, really awesome. Run your app and press the record button (next to the enable/disable breakpoints): as you navigate your app, code will be automatically generated. A quick test for LTHMonthYearPickerView: let app = XCUIApplication() let toolbar = app.toolbars app.pickerWheels["2015, 46 of 91"].swipeUp() toolbar.buttons["Done"].tap() let textField = app.children(matching: .window) .element(boundBy: 0) .children(matching: .textField) .element(boundBy: 0) XCTAssertTrue(textField.value as? String == "June / 2034") XCTAssertFalse(textField.value as? String == "June / 2033") The value property is, as described in the documentation: ]]>]]> The raw value attribute of the element. Depending on the element, the actual type can vary. I won't go into details, because others have already done that, and better than I would, so I will just share some links: Ash Furrow, Ray Wenderlich, The Next Web. Also, Apple's Sample Code collection is pretty huge this year. Here's a really small sneak peek by @AirspeedVelocity: ]]>]]> Woo, finddid get a version that takes a closure (and got renamed to indexOf). This makes usage of UIView.animateWithDuration:animations so much more appealing. Some examples from the repo's readme: // Also works with spring animations: UIView.animate(duration: 2.0, delay: 2.0, options: [.repeat, .autoreverse, .curveEaseOut], animations: { self.view.layer.position.x += 200.0 self.view.layer.cornerRadius = 20.0 self.view.layer.borderWidth = 5.0 }, completion: nil) // Also works with chains! UIView.animateAndChain(duration: 1.0, delay: 0.0, options: nil, animations: { self.view.center.y += 100 }, completion: nil).animate(duration: 1.0, animations: { self.view.center.x += 100 }).animate(duration: 1.0, animations: { self.view.center.y -= 100 }).animate(duration: 1.0, animations: { self.view.center.x -= 100 }) // A nice addition is canceling chains: let chain = UIView.animateAndChain(duration: 1.0, delay: 0.0, options: nil, animations: { self.square.center.y += 100 }, completion: nil).animate(duration: 1.0, animations: { ... } [...] // Will stop after finishing the current step in the chain. chain.cancelAnimationChain() Install via cocoaPods with pod 'EasyAnimation'. I -%> When I wrote about improving the images, one of the code blocks was for CSS, and two were for jQuery. Since it was more or less the first time I wrote jQuery, I thought it might have extra syntax over js, so I set the block's language to jQuery. Sadly, the pygmentation gem was returning an error of unknown value "jQuery", or something like that. Silly me for not testing locally, but it would have been much more elegant for the gem to not colorize anything in that block, instead of returning a 500 if the language passed in is invalid. Until now I used pygments gem. I'm pretty sure it would have been just as good to keep using it, but I switched to rouge: class HTML < Redcarpet::Render::HTML include Rouge::Plugins::Redcarpet def block_code(code, language) Rouge.highlight(code, language || 'text', 'html') end end Then I swapped my pygments.sass file to pygments.scss and filled it with the theme from, since I quite like it, and that was it. I'm pretty sure I will tweak the theme in the near future, but I'm happy for now. There's also this little helper rougify style monokai.sublime > syntax.css. In the feed I had this check, the same for displaying posts: last_updated = time_from_string(post[:datetime]) # Skip post if date is invalid or in the future next if last_updated == nil || DateTime.now.to_time < last_updated which worked properly for the site, but for the feed I was getting this error: undefined method `w3cdtf' for class `ActiveSupport::TimeWithZone` which was happening when I was setting the item.updated field to last_updated. The fix was pretty easy (and obvious, now, after I fixed it) - just convert the returned object to_time: ]]>]]> def time_from_string(string) [...] DateTime.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i, time[:hour], time[:min], 0, time[:zone]).in_time_zone.to_time end I have on site several kinds of images, but since I write my posts in Markdown, and I want to make use of its native features, the images created this way have no class. So I had to find a way to style these imgs. First step, CSS: img { border-radius: 2px; -webkit-box-shadow: #777 0 0 3px 1px; -moz-box-shadow: #444 0 0 3px 1px; /* color, h-offset, v-offset, blur, spread */ box-shadow: #777 0 0 7px 0; /* inset is optional */ // This will center the images inside posts. &:not([class]) { margin: 0 auto; display: table; } } But the problem here is with images that are wider than the article's width. Second step, jQuery: $('img:not([class])').on('load',function() { if (img.width() > $('section').width()) { img.wrap("<div class='centered-image-wrapper'></div>"); } }); Where centered-image-wrapper has the same border, border-radius, and shadow as img, with overflow: hidden; and text-align: center; added. But now, on resize the images will remain unwrapped, since the condition was made on load. Step three: ]]>]]> $(document).ready(function() { $(window).resize(function() { resizeImage($('img:not([class])')) }); $('img:not([class])').on('load',function() { resizeImage($(this)) }); }); var resizeImage = function(img) { if (img.width() > $('section').width()) { if (!img.closest('.centered-image-wrapper').length) { img.wrap("<div class='centered-image-wrapper'></div>"); } } else if (img.closest('.centered-image-wrapper').length) { img.unwrap() } }; Previously I said I had to scan the hrefs for my search-mark spans, but I forgot my assets: content.scan(/\/assets\/.*?"/).each do |s| edited_link = s.gsub('<mark class=\'search\'>', '') edited_link.gsub!('</mark>', '') content.gsub!(s, edited_link) end Sadly, I didn't find a proper way to skip URLs, so I have to scan several times to remove search-mark from them :( I. ]]> Improvement #1: adding the current branch name after the current path. Add this in your .bash_profile file: parse_git_branch() { git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/' } export CLICOLOR=1 export LSCOLORS=GxFxCxDxBxegedabagaced export PS1='\[\e[0;32m\]\u\[\e[0m\]:\[\e[0;33m\]\w\[\e[0m\]\[\e[0;35m\]$(parse_git_branch)\[\e[0m\]$ ' And the outcome is user:path (branch_name) $. The user is green, the path is yellow, the branch_name is purple and everything else is white. Improvement #2: autocompletion for branch names. Again, add this in your .bash_profile file: if [ -f ~/.git-completion.bash ]; then . ~/.git-completion.bash fi I find these pretty useful. Actually, I find them indispensable after a prolonged use. Don't forget to tweak the colors to your own preferences.]]> First of all, I changed the <search-mark> tag to <span class='search-mark'>. Secondly, I'm properly adding the search-mark to all occurrences of the searched term, instead of only the first (silly me). Lastly, I search the content again to remove the search-mark from inside hrefs: ]]>]]> start_index = content.downcase.index(w) end_index = start_index + w.length original_occurrence = content[start_index..end_index - 1] content.gsub!(/#{original_occurrence}/i, '<span class=\'search-mark\'>\0</span>') content.scan(/href=".*?"/).each do |s| edited_link = s.gsub('<span class=\'search-mark\'>', '') edited_link.gsub!('</span>', '') content.gsub!(s, edited_link) end Sometimes you need to find out what called a method. StackOverflow helped me find an answer: public func printCallingFunction() { let syms = Thread.callStackSymbols() if !syms.isEmpty { var sourceString = syms[2] as? String ?? "" var separatorSet = CharacterSet(characters: " -[]+?.,") var array = sourceString .componentsSeparated(by: separatorSet) .filter() { !$0.isEmpty } } // 3 is the class, 4 is the method. print("--- \(array[3]) - \(array[4])"); } There's also this approach, but I don't really like it: ]]>]]> #include <execinfo.h> void *addr[2]; int nframes = backtrace(addr, sizeof(addr)/sizeof(*addr)); if (nframes > 1) { char **syms = backtrace_symbols(addr, nframes); NSLog(@"%s: caller: %s", __func__, syms[1]); free(syms); } else { NSLog(@"%s: *** Failed to generate backtrace.", __func__); } Since creating formatters is very expensive, it's a good idea to have a singleton for them. But what if you have a standard style throughout the app, except a couple of places? Encapsulate it in a Static struct and set the required settings before returning: struct Utils { class var numberFormatter: NumberFormatter { struct Static { static let formatter = NumberFormatter() } Static.formatter.maximumFractionDigits = 2 Static.formatter.alwaysShowsDecimalSeparator = false return Static.formatter } } Then in the places with a different style: let x = Utils.numberFormatter x.maximumFractionDigits = 3 x.alwaysShowsDecimalSeparator = true print(x.maximumFractionDigits) // => 3 print(x.alwaysShowsDecimalSeparator) // => true But throughout the app the "default" settings will be returned: let y = Utils.numberFormatter print(y.maximumFractionDigits) // => 2 print(y.alwaysShowsDecimalSeparator) // => false Just be careful with this approach - if you allocate y between allocating and using x, you will get the default settings for both x and y: ]]>]]> let x = Utils.numberFormatter x.maximumFractionDigits = 3 x.alwaysShowsDecimalSeparator = true let y = Utils.numberFormatter print(x.maximumFractionDigits) // => 2 print(x.alwaysShowsDecimalSeparator) // => false Now it seems obvious, and I feel silly about it, but setting a view's autoresizingMask's before setting its frame or bounds, makes the resizing not work properly. In a recent change I tried to handle the time zone like this: def time_from_string(string) date_matches = string.match(/(\d{4})-(\d{2})-(\d{2})-(\d{4})/) Time.zone = 'Bucharest' time_zone = Time.zone.formatted_offset # A little hack to account for daylight savings of when the post was created time_zone = '+03:00']).in_time_zone end But that means it will create a time object based on the current time zone, which is not correct; the post was created in a different time zone. So the comparison with the current date will always be off. So I added the sinatra-activerecord gem and came up with this solution: def time_from_string(string) date_matches = string.match(/(\d{4})-(\d{2})-(\d{2})-(\d{4})/) # I always want to link the post creation date to my local time, no matter if I wrote the post on a trip. time_zone = 'EET' # It returns true if the date was during dst interval (which differs from year to year) time_zone = 'EEST']).to_time end Update, May 18, 2015: Apparently I had to set the timezone for heroku, as well - heroku config:add TZ=Europe/Bucharest. This finally did the trick. I remembered I have a Hazel rule that syncs the files automatically when a new file is added. So I realised I can improve the future posts even further: instead of excluding files, it's better to exclude posts, based on date: ]]>]]> get '/' do [...] all_posts.reject! do |post| time_from_string(post[:datetime]) == nil || DateTime.now.to_time < time_from_string(post[:datetime]) end [...] end get '/feed' do [...] posts.each do |post| next if time_from_string(post[:datetime]) == nil || DateTime.now.to_time < time_from_string(post[:datetime]) [...] end [...] end Sometimes I have an idea to write about, but I don't really want to post it now, since I want to spread the posts out, so I will just prepare it for future posting. The problem is that right now the sync command tries to add all files, so I came up with a way to only parse files with a date in the past: ]]>]]> def time_from_string(string) date_matches = string.match(/(\d{4})-(\d{2})-(\d{2})-(\d{4})/) time_zone = Time.now.getlocal.zone time = Date._strptime("#{date_matches[4]} #{time_zone}", '%H%M %Z') DateTime.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i, time[:hour], time[:min], 0, time[:zone]).to_time end get '/sync_command' do [...] client_metadata.each do |file| next if DateTime.now.to_time < time_from_string(datetime) [...] end I read this a while ago, and I just love doing this: view.backgroundColor = .red view.backgroundColor = .custom formatter.locale = .autoupdatingCurrent label.font = .withSize(15) label.font = .customFont(ofSize: 15) If only the compiler would offer proper autocomplete ... But, hey, at least it's faster and/or easier sometimes.]]> Build, archive, submit to iTunes Connect, HockeyApp or similar services from the command line. I was skeptical at first, but now I kinda look forward to using it. Took me a while to set it up properly, due to poorly configured openssl, mostly, but otherwise it's pretty straightforward. You can get it here. Felix is an awesome guy too - I had a problem and he managed to reply faster than I came back from lunch 😊.]]> Since I don't want everybody to run this command, although it doesn't really harm in any way, I added a parameter to act as a private key: get '/cmd.Dropbox.Sync/:key/?:with_delete?' do redirect_to_404 unless params[:key] == MY_SYNC_KEY The with_delete parameter lets the sync posts command know if to check for deleted files or not. Why this approach? Because the support for multiple posts with same title implies iterating over all posts and on each iteration to iterate over all file names, to make sure. all_posts.each do |post| delete = true client_metadata.each do |file| # Same code as in previous post, by adding a --i suffix # Otherwise, when adding a new post with an existing title would be immediately deleted from the database, # since link--1 != link, even though title == title end delete = false if link == post.link It already takes quite a few seconds at only 50 posts, so I'd rather not do that all the time, especially since I won't be deleting posts, unless exceptional occasions.]]> I Oh man, I can't believe it's been almost a year since I've written anything. But it's been an amazing year! I will start with Polychromatic, an Xcode theme that takes a different approach to syntax coloring: emphasize variables (only local ones for Swift), arguments, macros and statics by only coloring these, with a dynamically picked color. I was skeptical at first (I quickly uninstalled it the first time I tried it, a couple of months ago) but I feel that my eyes are a bit more rested, due to less color and I can spot important information more easily.]]> Technically, it's not a first impression, because the first one was "ugh". But then I read the book and watched all the WWDC videos. Oh boy! Don't even know where to start, but man, oh man, some stuff is just mind-blowing. Here are a few examples of Operator Overload combined with Generics. // "test" * 2 func * (left: String, right: Int) -> String { var newString = "" right.times { newString += String(left) } return newString } // testtest // "abcdef"[2] subscript(digitIndex: Int) -> Character? { var i = 0 for char in self { if i == digitIndex { return char } i++ } return nil } // c // [1, 2, 3] << 4 func <<<T> (inout left: T[], right: T) { left.append(right) } // [1, 2, 3, 4] // view << newSubview func <<<T: UIView> (inout left: T, right: T) { left.addSubview(right) } // emptyDic = ["k1": "v1", "k2": "v2"] + ["k3": "v3"] func +<K, V> (left: Dictionary<K, V>, right: Dictionary<K, V>) -> Dictionary<K, V> { var l = left for (k, v) in right { l[k] = v } return l } // emptyDic = ["k1": "v1", "k2": "v2", "k3": "v3"] // emptyDic << ["k4": "v4"] func <<<K, V> (inout left: Dictionary<K, V>, right: Dictionary<K, V>) { left += right } // emptyDic = ["k1": "v1", "k2": "v2", "k3": "v3", "k4": "v4"] As for iOS 8 goodies ... Check NSHispter's latest post.]]> This is the second project I've been working on in the past year or so. It's your fancy and fun way to learn about the national anthems and cultures of many different countries. This is the special World Cup 2014 Edition, featuring all 32 participating nations under one theatrical, vaudeville roof. Features: Come hear our puppets sing and juggle simultaneously and learn some cool facts at the same time about your own and others' culture. Soon you’ll be singing along too!]]> For the past 6 months I've been working with the guys at DeinDeal on their native iOS app. During this time I worked with some great, creative and really smart people, and I've had the chance to learn a lot. This is the result, of which I'm quite proud.]]> You!==]]> For my first 2 games I had a project for the Pro version and another project for the Free version, and, as everyone knows, that's a huge pain to maintain. So, at first I created this: #define kFreeVersion ([[[NSBundle mainBundle] infoDictionary]\ [@"CFBundleIdentifier"] isEqualToString:@"com.rolandleth.appnamefree"]) and added a warning in the App Delegate: #warning Reminder: Don't forget to check this on every release! if (kFreeVersion) { // do stuff for the free version } But this implied, as the #warning suggests, to never forget about manually changing the bundle ID at every release. So searching I went and I found this, then this. Pretty basic stuff, which I never thought about. I combined the 2 approaches, and now I have 2 targets, but use the above macro instead because I think #ifdefs make the code look ugly. Combined with this script (I have Run script only when installing checked): VERSIONNUM=$(/usr/libexec/PlistBuddy -c "Print CFBundleShortVersionString" "$INFOPLIST_FILE") NEWSUBVERSION2=`echo $VERSIONNUM | awk -F "." '{print $2}'` NEWSUBVERSION3=`echo $VERSIONNUM | awk -F "." '{print $3}'` NEWSUBVERSION3=$(($NEWSUBVERSION3 + 1)) NEWVERSIONSTRING=`echo $VERSIONNUM | awk -F "." '{print $1 ".'$NEWSUBVERSION2'" ".'$NEWSUBVERSION3'" }'` /usr/libexec/PlistBuddy -c "Set :CFBundleShortVersionString $NEWVERSIONSTRING" "$INFOPLIST_FILE" makes releasing 2 versions of an app much easier than what I was used to. Edit, 10 minutes later: Well, I had to switch to the #ifdef approach, because I wanted to remove all the ad related frameworks from the paid version, so I don't have any troubles with the approval. Oh well... This has to be the most awesome webpage and one of the most impressive pieces of software I've ever seen. Yes, the routes are real and they update in real-time.]]> Simple to use UIPickerView for those pesky Credit Card expiration dates, or any other mm-yyyy need you might have. Comes with a slew of delegate methods and access to the currently selected month and year: - (void)pickerDidSelectRow:(NSInteger)row inComponent:(NSInteger)component; - (void)pickerDidSelectMonth:(NSString *)month; - (void)pickerDidSelectYear:(NSString *)year; - (void)pickerDidSelectMonth:(NSString *)month andYear:(NSString *)year; - (void)pickerDidPressDoneWithMonth:(NSString *)month andYear:(NSString *)year; - (void)pickerDidPressCancel; - (void)pickerDidPressCancelWithInitialValues:(NSDictionary *)initialValues; @property (nonatomic, strong) NSString *year; @property (nonatomic, strong) NSString *month; It's easy to init: /** * Month / Year picker view, for those pesky Credit Card expiration dates and alike. * * @param date set to a date if you want the picker to be initialized with a specific month and year, otherwise it is initialized with the current month and year. * @param shortMonths set to YES if you want months to be returned as Jan, Feb, etc, set to NO if you want months to be returned as January, February, etc. * @param numberedMonths set to YES if you want months to be returned as 01, 02, etc. This takes precedence over shortMonths if set to YES. * @param showToolbar set to YES if you want the picker to have a Cancel/Done toolbar. * * @return a container view which contains the UIPicker and toolbar */ - (id)initWithDate:(NSDate *)date shortMonths:(BOOL)shortMonths numberedMonths:(BOOL)numberedMonths andToolbar:(BOOL)showToolbar; And that's about it. You can grab it here; also available in pod LTHMonthYearPickerView form. From.]]> Speechless.]]> I have encountered lately so many services that do not include an unsubscribe link in their newsletter, it makes me wonder if this is a new thing. If you send out newsletters or "important messages" to a mailing list, but do not include an unsubscribe link, you have zero respect for your users. I'm honestly thinking of creating a section in here to keep track of all these guys, just to let people know who to avoid.]]> Does Exactly What It Says This app works exactly as it says it does. After the most recent update I adjusted the sensitivity conrol and it works perfectly! His rating? 2 stars.]]> It's been a while since I've posted, but here's something I've been working on lately for Expenses Planner: Also, a lot of improvements have been made to LTHPasscodeViewController, for those of you that are interested. Carminder is next, basically with the same improvements. Stay tuned.]]> This has to be one of the best online regex tools. If you even rarely work with regular expressions, you have to at least have a look at this. The visualizer and the debugger are amazing. If you're serious about regex, you can go ahead and create an account for extra features like being able to save more than 2 unit tests, public expressions, unit testing and more. Here is a quick tutorial from their team; it's a bit outdated, but it just differs visually, the functionality is basically the same. You can easily make an idea of how it works by watching it.]]> This looks amazing. I'm not even working with CSS for that long and I still find it amazing. Can't wait for it to go mainstream. ==It will be awesome.== via Monday by Noon]]> Well,: Wouldn't I have went free with IAP, my sales would have probably stayed at $1.99 every other day, far from a decent income. And there was no real middle ground either, since Apple does not give us, developers, a way to offer discount or upgrade pricing. Also, please understand, a developer can't afford to invest time adding more and more features indefinitely, for free. When you, the user, make a purchase, you pay for the app ==in that state== (and future bug fixes), not for the app ==and whatever it will become, until the end of time==. Take Expenses Planner, for example: I added quite a few powerful features and a lot of UI work was made to fit iOS 7, and while, yes, old users paid for the old set of features, for the new ones and the UI overhaul, no one did. Choosing to add IAP(s) for (each of) these new features instead of going free with one IAP, would have been really greedy towards new users, wouldn't it? To create a separate "2.0" app wouldn't have been a better solution either, because it would have hurt old users just as much, not to mention reaching out to let them know of the new app would have been nigh impossible. So this was my middle ground: add enough new features on top of the UI overhaul so the new pricing model won't feel greedy for neither the old, nor the new users. As an old user, you can think of it like this, if you will: I paid/will pay $1.99 for the new UI, iCloud, export (plus Passcode and Recurring expenses for Expenses Planner); sounds quite fair to me, especially given the circumstances. On better news: Apple has provided us a way to check when a particular purchase was made, be it IAP or app. This means that from now on we can check who purchased the app (or IAP) before a certain date (when it went free, for example), then take that into consideration when granting features. Awesome change for both developers and users.]]> Yay! A lot of changes on top of the UI facelift, like iCloud sync, export to plain or CSV, Passcode lock and many other minor improvements. Grab them from the App Store now: Carminder and Expenses Planner.]]> Update, 29 May 2014: Check the 3.0.0 version here. I haven't found any proper Passcode classes for iOS 7, so I gave my best at creating one: a simple to use, iOS 7 style (replica, if you will) Passcode view controller (not the Lock Screen one, but the one you get when changing your passcode in Settings). Can be used for iOS 6 too, albeit it might need a few modifications, haven't tested it. I made it a singleton because if the lock is active, when leaving the app a view must be placed on top, so no data from within the app can be seen in the multitasking mode. This is done under the hood, without having to do anything extra. - (void)showForEnablingPasscodeInViewController:(UIViewController *)viewController; - (void)showForChangingPasscodeInViewController:(UIViewController *)viewController; - (void)showForTurningOffPasscodeInViewController:(UIViewController *)viewController; // Example: [[LTHPasscodeViewController sharedUser] showForEnablingPasscodeInViewController: self] - (void)showLockscreen; // Example: [[LTHPasscodeViewController sharedUser] showLockscreen] // Displayed with a slide up animation, which, combined with // the keyboard sliding down animation, creates an "unlocking" impression. Everything is easily customisable with macros and static constants. Makes use of SFHFKeyChainUtils to save the passcode in the Keychain. I know he dropped support for it, but I updated it for ARC 2 years ago (with help) and I kept using it since. The 'new' version isn't updated to ARC anyway, so I saw no reason to switch to it. Rather than writing a big documentation, I heavily commented it as best as I could. Here's the repo and here are two screenshots: Just revised Carminder and Expenses Planner landing pages to reflect the updated, iOS 7 versions. Waiting For Review is going to feel like an eternity.]]> One of the biggest reasons I was pondering to redirect my search to Google's site:rolandleth.com was their bolding of the searched terms. But here's my take on this, keeping the search local: <%- posts.each do |post| date_matches = post[:datetime].match(/(\d{4})-(\d{2})-(\d{2})-(\d{4})/) date = Date.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i).to_time.utc.to_i content = _markdown(post[:body].lines[2..-1].join) title = post[:title] if defined?(search_terms) search_terms.each do |w| if content.downcase.index(w) # Saving the original occurrence of the searched term, since the search is done # by ignoring case (downcasing both the search term and the body/title) start_index = content.downcase.index(w) end_index = start_index + w.length original_occurrence = content[start_index..end_index - 1] # Then wrapping the original occurrence between a custom tag. content.gsub!(original_occurrence, "<search-mark>#{original_occurrence}</search-mark>") end if title.downcase.index(w) start_index = title.downcase.index(w) end_index = start_index + w.length original_occurrence = title[start_index..end_index - 1] title.gsub!(original_occurrence, "<search-mark>#{original_occurrence}</search-mark>") end end end -%> <%= erb :post, locals: { date: date, title: title, content: content, link: post[:link]} -%> <%- end -%> And here's the CSS for the search-mark tag: search-mark { border-radius: 2px; background-color: #fff0f4; font-style: normal; } ==Easy, peasy!==]]> Well, it's done. Thanks to the desire to publish when mobile, I had to implement some sort of Dropbox sync, which forced me to implement a database for a better user experience than handling files directly, which, lastly, gave me the opportunity to implement search: a feature that was ==a lot== easier to implement than I've expected: 30 lines of code. The search form: <form class="search" action="/search" method="get"> <input type="text" class="search" name="query" size="15" maxlength="20" placeholder="Search..."/> </form> Sinatra's route block: get %r{^/search$} do query = request.env['rack.request.query_hash']['query'] query_array = [] # Store all search terms in an array. If the search was done using "", # add the whole search query as the single element in the array. if query[0,1] == '"' and query[-1,1] == '"' query_array << query.gsub('"', '').downcase else query_array = query.downcase.split(' ') end # This returns an array of structures containing all the posts' data. all_posts = repository(:default).adapter.select('SELECT * FROM application_posts') # Convert the structures to hashes. all_posts.map! { |struc| struc.to_h} # Sort the array by the datetime field of our hashes. Newest to oldest. all_posts.sort! { |a, b| a[:datetime] <=> b[:datetime]}.reverse! # This is the core and the most beautiful part. # Could have been a one-liner too, but I wanted it to be more readable. all_posts.select! do |p| # Iterate through the posts array and keep the ones that include in their body or title an element from the query_array query_array.any? { |w| p[:body].downcase.include?(w) or p[:title].downcase.include?(w) } end @meta_description = 'iOS and Ruby development thoughts by Roland Leth.' all_posts.count > 0 ? erb :index, locals: { posts: all_posts, page: 1, total_pages: 1, gap: 2 } : search_not_found end Plus 10 lines of CSS, and that was that. I was (and still am) pondering on using Google's site: search. I feel its bolding of the search term(s) is a better user experience, but the pages aren't updated as fast as I post them, so I'll keep my custom search for now. ]]>]]> <form class="search" action="" method="get"> <input type="hidden" name="q" value="site:rolandleth.com"> <input type="text" class="search" name="q" size="15" maxlength="20" placeholder="Search..." accesskey="/"/> </form> Ok, so I went ahead and added a database and implemented some sort of sync between said database and my Dropbox folder. It's not perfect, but it works for me. I created a Posts model and configured the DataMapper: class Posts # Yes, I know the general consensus is to use singulars for Models, # but DataMapper creates/uses tables named 'application_[class_Name]' and 'application_posts makes more sense # Update, 30 May 2014: Shouldn't have cared about the table name as much as the class name. Point learned. include DataMapper::Resource property :id, Serial DataMapper::Property::String.length(255) DataMapper::Property::Text.length(999999) # Just because :) property :title, Text property :body, Text property :date, String property :time, String property :modified, String end configure :production do require 'newrelic_rpm' # Heroku provides the 'DATABASE_URL' so you don't have to type it manually DataMapper::setup(:default, ENV['DATABASE_URL']) DataMapper.auto_upgrade! end configure :development do DataMapper::setup(:default, "postgres://roland@localhost/roland") DataMapper.auto_upgrade! end On with the Dropbox sync, attached to a custom URL, for which I created a simple Hazel rule to trigger when I add a new file to my posts folder: get '/cmd.Dropbox.Sync' do session = DropboxSession.new(APP_KEY, APP_SECRET) session.set_access_token(AUTH_KEY, AUTH_SECRET) client = DropboxClient.new(session, ACCESS_TYPE) client_metadata = client.metadata('/Apps/Editorial/posts')['contents'] client_metadata.each do |file| matches = file['path'].match(/\/(apps)\/(editorial)\/(posts)\/(\d{4})-(\d{2})-(\d{2})-(\d{4})-([\w\s\.\}\{\[\]_&@$:"';!=\?\+\*\-\)\(]+)\.md$/) date = matches[4].to_s + '-' + matches[5].to_s + '-' + matches[6].to_s time = matches[7].to_s title = matches[8].to_s file_mtime = file['client_mtime'].to_s # If we were to assign the body variable outside the if-else statement # to avoid repeating code, all the files would be downloaded, _greatly_ increasing # the time for this bit of code to complete. This way, we download only the required files. post = Posts.first(:title => title) # If the posts exists if post # Check to see if it was modified if post.modified != file_mtime body = client.get_file(file['path']) post.update(title: title, body: body, date: date, time: time, modified: file_mtime) end # Otherwise, create a new record else body = client.get_file(file['path']) Posts.create(title: title, body: body, date: date, time: time, modified: file_mtime) end end all_posts = Posts.all # Check if any post was deleted (highly unlikely) all_posts.each do |post| delete = true client_metadata.each do |file| title = file['path'].match(/\/(apps)\/(editorial)\/(posts)\/(\d{4})-(\d{2})-(\d{2})-(\d{4})-([\w\s\.\}\{\[\]_&@$:"';!=\?\+\*\-\)\(]+)\.md$/)[8].to_s delete = false if title == post.title end post.destroy if delete end redirect '/', 302 end Then all I had left to do was to modify the code for parsing posts: # This returns an array of structures containing all the posts' data all_posts = repository(:default).adapter.select('SELECT * FROM application_posts') # Convert the structures to hashes all_posts.map! { |struc| struc.to_h} # Sort the array by the datetime field of our hashes all_posts.sort! { |a, b| a[:datetime] <=> b[:datetime]}.reverse! […] category = post.body.lines.first date_matches = post.date.match(/(\d{4})-(\d{2})-(\d{2})/) date = Date.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i).to_time.utc.to_i title = post.title content = _markdown(post.body.lines[2..-1].join) Then I removed all the md files from my project, since I won't be needing them anymore. That's about it: I can add new posts, modify old posts, delete posts and even add posts dated in the past. And all of this without having to push new commits, while mobile if desired and by writing md files. ==Awesome!== As usual, much easier than I expected before I started ... Stop thinking it's too hard! strikes again. Update, 28 Aug 2013: Added a little Ruby script to the Hazel rule, so I can keep a local count of the rule triggers. Why not merge the HTTP request in here? Because if the curl action doesn't complete, Hazel will not continue onto the Ruby action: no sync, no count. c = File.readlines('Syncs.txt').count - 1 date = Time.now.strftime('%Y-%m-%d %I:%M %p') body = c.to_s + "\n\n" + date.to_s + "\n" + File.readlines('Syncs.txt')[2..-1].join().to_s File.open('Syncs.txt', 'w') { |f| f.write(body)} I also have a table on the server's database where I also keep track of how many times the URL has been accessed; this way I can see how many times the URL has been accessed by other people feeling funny, heh.]]> I tried several text editors for the iPad so far and none really nailed it. Until now. After reading Viticci's review of Editorial, even knowing I won't make much use of it, I still went ahead and bought it. The fact that I had my iMac in service for a few days might have been a huge reason I was enjoying this, but I was enjoying it so much, that I actually went ahead and imported several workflows and created a few of my own and drafted a couple of blog posts for when I had my iMac back. I was kinda sad that I couldn't publish the posts right after writing them, since the blog is hosted on Heroku and they have a read-only filesystem (meaning i can't add new posts without pushing a git commit), but that's why I decided to give read-posts-from-Dropbox-instead-of-locally a try after all. The code is already there, but commented out because I ended up thinking it's not a great idea, since the flow when a user reaches the blog would be user -> server -> Dropbox -> server -> user, much slower than user -> server -> user, but until proven, it's all speculation. I might have to add a database somewhere for less redundant Dropbox communication, but I'm looking forward to it. If the extra fetch-from-Dropbox step proves to not slow things down too much, that means I will have the freedom to publish posts when mobile, granting me the opportunity to not only post from anywhere, but also to actually take a break from the computer as an excuse to write a blog post with Editorial on my iPad. Eventually outdoors.]]> Implementing this, per se, is no problem. But, kind of the same as recurring events, there's no proper way of handling this feature and I'll give a small example below. Say you have expenses laid out for the next 6 months: how should you enter the income? Manually calculating 6*monthly income? Or just for the next month? If the latter, subtracting all expenses from that doesn't make much sense, and the former isn't too reliable, since your income might vary from month to month. What about adding a new expense that's due in 8 months, but you entered the income for 6? Go and manually update the total? What if you forget? Etc... Sure, it would be easy to implement, but it's not user-friendly nor reliable. At least as I see it so far ... If you have any suggestions, please contact me; I'd be more than happy to add this, but, as I said in my previous post, first I want to make sure the solution is reliable, easy to use and intuitive; I don't want to implement a feature just for the sake of having it.]]> Update, 01 Nov 2014: Implemented a custom recurring system, explained here. Not perfect, but close enough to get my approval to be implemented.* There are two problems about this and I'll try to explain them the best I can. ==First==: Apple provides a way to implement recurring reminders, but I use it for the weekly/daily/hourly setting. Manually creating a separate system for recurring events is easy; the problem is that this method is not reliable. If a reminder is set to repeat, lets say hourly, a notification/alert will be presented every hour, without the need to open the app. In a custom-built system, the app would have to be opened after every notification/alert, for the reminder to be rescheduled. There is a semi-automatic solution: implement a "repeat every: value" field and a button that would reschedule the event. ==And the second, bigger problem==: how should the total be calculated? By adding the values of visible events only? Or by adding all the values, including the ones of repeating events for the upcoming year? Should it be the end of the current year, or a year starting from now? But then, why a year and not only 6 months? The questions can go on and on, and this is actually why I chose not to implement recurring events. If you have any suggestions, please contact me, I'd be more than happy to add recurring events, but first I want to make sure the solution is reliable, easy to use and intuitive; I don't want to implement a feature just for the sake of having it.]]> I.]]> I recently realized that if I were to have two posts on the same day, they would be ordered alphabetically, not by time, because the filename has no time reference. I wouldn't be able to use file creation time either, because the creation time of all files on the server is the same: the git push time. I did have a time references inside files, but to actually check what files are on the same date, then order just these by said time, while keeping to order the rest, independently, would have been a pain to implement, at least to the simple, albeit ugly, solution: add a time component to filenames. The modifications were minimal: # The filenames were the hardest part, since I had 27 of them # YYYY-MM-DD-HHMM-File name.md # The regex post.match(/\/(\d{4})-(\d{2})-(\d{2})-(\d{4})-([\w\s\.\}\{\[\]_&@$:"';!=\?\+\*\-\)\(]+)\.md$/) # The title to equal the 5th match instead of 4th title = match[5] # The content: start reading a line sooner, since the line with the time is gone _markdown(File.readlines(post)[2..-1].join()) # The time variable for the feed to read from the filename time = Date._strptime("#{matches[4]} EEST","%H%M %Z") I'm pleased to announce a brand new, beautiful and minimalist financing app, Expenses Planner: keeps track and reminds you of your upcoming expenses and due payments. I just submitted it for review, meaning it's coming to an App Store near you in two weeks time. ==Hooray!==]]> I finally got around and implemented the RSS feed. Wasn't as hard as I expected; piss easy, actually (check the end for the updated version): get '/feed' do posts = Dir['posts/*.md'].sort_by!{ |m| m.downcase }.reverse rss = RSS::Maker.make('2.0') do |maker| maker.channel.icon = "/public/favicon.ico" maker.channel.logo = "/public/favicon.ico" maker.channel.title = 'Roland Leth' maker.channel.description = 'Roland Leth' maker.channel.link = "/" maker.channel.language = 'en' hours = [0, 1, 2, 3, 20, 21, 22, 23] maker.items.do_sort = false posts.each do |post| matches = post.match(/\/(\d{4})-(\d{2})-(\d{2})-([\w\s\.\}\{\[\]:"';!=\?\+\*\-\)\(]+)\.md$/) i = maker.items.new_item i.title = matches[4] time_string = File.readlines(post)[1] # in case I forget to fill the time, just create a random hour between 8 PM and 3 AM, that's when I work most of the time if time_string.length == 8 or time_string.length == 9 time = Date._strptime("#{time_string} EEST","%H:%M %p %Z") time[:leftover] = nil else min = rand(0..59) time = Date._strptime("#{hours.sample}:#{min} EEST","%H:%M %Z") end # titles are written 'Like this', links need to be 'Like-this' i.link = "{matches[4].gsub("\s", "-")}".gsub(";", "") content = _markdown(File.readlines(post)[3..-1].join()) i.description = content i.date = DateTime.new(matches[1].to_i, matches[2].to_i, matches[3].to_i, time[:hour], time[:min], 0, time[:zone]).to_time end end rss.to_s end But now I had another problem to fix: RSS Readers do not interpret my custom CSS, meaning the hack I did with the underline to show up as italic doesn't work with maker. Well, I reached GitHub while searching for a solution and it turned out that Sam posted this 4 days ago. TL;DR: He hacked Redcarpet to parse ==text== as <mark>text</mark> and the guys merged his changes. How fortunate :) ]]>]]> get '/feed' do posts = repository(:default).adapter.select('SELECT * FROM application_posts') posts.map! { |struc| struc.to_h } posts.sort! { |a, b| a[:datetime] <=> b[:datetime] }.reverse! rss ||= RSS::Maker.make('atom') do |maker| maker.channel.icon = '/public/favicon.ico' maker.channel.logo = '/public/favicon.ico' maker.channel.id = '' maker.channel.link = '' maker.channel.title = 'Roland Leth' maker.channel.description = 'Roland Leth' maker.channel.author = 'Roland Leth' maker.channel.language = 'en' maker.channel.rights = "© #{Time.now.year} Roland Leth" maker.channel.subtitle = 'iOS and Ruby development thoughts by Roland Leth.' maker.items.do_sort = false posts.each do |post| i = maker.items.new_item i.title = post[:title] date_matches = post[:datetime].match(/(\d{4})-(\d{2})-(\d{2})-(\d{4})/) time = Date._strptime("#{date_matches[4]} EEST", '%H%M %Z') i.link = "{post[:link]}" i.content.content = _markdown_for_feed(post[:body].lines[2..-1].join()) i.content.type = 'html' i.updated = DateTime.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i, time[:hour], time[:min], 0, time[:zone]).to_time i.published = DateTime.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i, time[:hour], time[:min], 0, time[:zone]).to_time # The RSS was last updated when the last post was posted (which is first in the array) maker.channel.updated ||= DateTime.new(date_matches[1].to_i, date_matches[2].to_i, date_matches[3].to_i, time[:hour], time[:min], 0, time[:zone]).to_time end end rss.link.rel = 'self' rss.link.type = 'application/atom+xml' rss.entries.each do |entry| entry.content.lang = 'en' entry.title.type = 'html' end rss.to_s end When I moved to a git-based blog, I knew I had no way to edit published posts that easily, like I had in WordPress. And, boy, did I use to edit and re-edit a post after I published it. Sure, I can commit over and over to edit stuff, but that's not great practice at all. But then it struck me: ==this is actually a good thing==. I will have to focus better and pay more attention, resulting in small, but steady improvement over time. Last night, for example, I did a lot of commits just to edit some silly mistakes and silly typos; this is something I will force myself not to repeat. Something else that struck me? ==Writing is hard!== It's not my goal, but I'd like to get better at it, because better organized thoughts mean a better organized mind, benefiting other domains too. On a side note: I added underline: true to the markdown renderer, I changed the css for underlines to u { font-style: italic; text-decoration: none; } and now _text_ is displayed as italic. Why? Well, because sometimes I'd like to emphasis something, but not ==this bad==: a quote, without using a quote block, for example. Update, Jun 26 2013: The above hack was replaced by using a newer version of Redcarpet that can parse ==text== as <mark>text</mark>. Nonetheless, it's still a decent hack when needed. Today I installed their standard Heroku add-on and, after only half a day of usage, I'm blown away. Here's what you get for free: The amount of information it can gather is impressive, and it can be installed on any platform, even mobile apps. I think it's worth checking it out. Here is a pretty detailed review by net.tutsplus. Oh, did I mention their support is awesome? I received replies in under an hour, four times in a row; for quite unimportant stuff too. On a side note, I finally configured syntax highlighting; awesome. Stole and adapted the pygmentation file from Mattt's NSHipster. Finally took the time to recreate the blog, the WordPress theme i was using was highly customized, but wasn't really fitting my needs anymore. And, again, since I'm learning web development, why not redesign it completely? The layout is basically the same, but I stole Sam's pagination and Matt's header and tweaked them to fit my preference. The site was created with Sinatra, and it's hosted on Heroku. I'm still using Markdown and Mou to write, but instead of using a database for the posts, I'm using .md files. This way I have them on the server git, on my Mac (and on Time Machine, implicitly) and synced with Dropbox, since I will be using Byword for mobile editing. I also created a simple Alfred Workflow to commit and push after I'm done creating a new post: cd /path/to/my/git git add . git commit -am "New post" git push heroku master The only thing that I'll be missing is publishing from mobile, since I don't have a way to git push when I'm not at my Mac, nor do I know how to read straight from Dropbox; but I'll get to that too, eventually. I'll post the source on GitHub tomorrow, I just need to properly comment it. Wow, the new HIG is on a totally new level regarding the layout of information; it will be a pleasure to read. I have to admit I didn't really read the old one, but I'm ==definitely== going to read this one.]]> Landscape for non-iPads is kinda disappointing, but that's the best I could hack into the current WordPress theme. Maybe I will create an html theme with Rails backend and finally ditch WordPress soon enough.]]> Since. What. This whole concept is awesome. I quote: Red Pen lets you upload your design, share a short URL, and get live, annotated feedback super-fast. This is how it works: you go to RedPen.io and upload your image. You'll receive a personalized URL, in short format, like. You're then invited to fill your name and e-mail address, so the system remembers you: From second visit onwards the bar looks like this: You can then start sending it to people you'd like an opinion from. To leave a comment you just tap on the image, preferably on the exact spot/area you want to comment on, because a red dot appears where you click: Hovering the image reveals all entry points (dots) for existing comments (or threads if you prefer this naming): If you keep your link open somewhere, you'll even receive live notifications every time a new comment is added: Hovering a dot opens a popup with all comments in that spot, allowing you to reply to that particular thread: Go ahead and give it a try, it's pretty awesome, if you ask me.]]> This version brings: • Badge count. You can now set the badge on the app icon for the number of events, as follows: tomorrow, within two weeks, within a month, all and none. • Finally fixed all the bugs preventing the keyboard to hide on the iPad. • Fixed a bug I introduced in last version where the keyboard would sometimes not hide on the iPhone. • Changed the name in the App Store. • Minor overall tweaks. Moved my blog here. Closed blog.rolandleth.com. Moved my work to rolandleth.com/work. Result: more organized, easier to maintain, but, most importantly, easier for everyone to interact with everything. Awesome app for previewing app icons directly on your device. Easy, seamless integration with your Mac: drag and drop an image to the menubar icon, and, bam, it's on the device. The Mac companion app is free and can be found on the Mac App Store here. The only caveat is that you can only preview one icon at a time. I ==really== hope they will add the possibility to add several icons (grid or even scroll based) so you can be able to compare several iterations at once. Otherwise, amazing app. You can find the app here; via BeautifulPixels.]]> I know this isn't new anymore, but I just wanted to share (and bookmark); it's worth it. Finally submitted Carminder. Can't wait for it to go live!]]> Wow. This has to be one of the prettiest free templates to showcase your iPhone app. From Tympanus.net. ==Thanks, Mary Lou!==]]> I need to bookmark this. And so should you. ]]>]]> There's this framework called Search Kit, which despite being insanely powerful and useful for finding information, is something that almost no one has ever heard of. Damn, this is pretty nice. Add particle effects to UIKit, just like in Cocos2D: create them with UIEffectDesigner, then add them to your project with UIEffectDesignerView. Works for both iOS and Mac. Still in beta, but I haven't found any glitches so far. It's as easy as: ]]>]]> #import "UIEffectDesignerView.h" ... UIEffectDesignerView* effectView = [UIEffectDesignerView effectWithFile:@"name_of_your_choice.ped"]; [self.view addSubview:effectView]; Whoa, indeed. Whoa. Timepiece, a symbol font that displays numeric time with analog face: (thx @fchimero). Via @cameronmoll.]]> Markdown to rich text in Mail.app with 1 click by @tobiasoleary: ckyp.us/dR1j; via Brett Terpstra. This is going to be ==awesome==.]]> So what if you didn't pay attention in Geometry class--this is the real world, and in the real world, you have CGGeometry.h Know it well, and you'll be on your way to discovering great new user interfaces in your apps. Do good enough of a job with that, and you may run into the greatest arithmetic problem of all: adding up all of the money you'll make with your awesome new app. Mathematical! Here is the whole writeup; totally worth a read.]]> Today.]]> Really nice utility class to modify the frame of an UIView. Semantically and without the multitude of CGRectMakes, using more readable stuff like: [view.po_frameBuilder setWidth:100.0f height:40.0f] You can find the class on his GitHub and read about it on his blog. ==Thanks, Sebastian!==]]> Making great progress on my current project, feels awesome! I will post some teaser screenshots soon enough. Today was a fine day for science!]]> The most obvious thing is that I started writing this blog; that's an addition to what I had so far. What did change are my app pages: instead of having them under rolandleth.com/appname, I moved them to appname.rolandleth.com, so I can tweak them separately and build landing pages for them. I used the free WordPress theme created by the great guys at Apptamin, theme found on GitHub. I also tweaked it to be used for apps/games in landscape, I moved some functions around that were creating a weird " " text, which in turn was creating a weird blank space and the result is on my GitHub; to be used with screenshots that have their own iPhone/iPod frames, not the included ones. This means in your WordPress Dashboard, in Appearance, under Apptamin Theme Options you need to set the Front Device and the Back Device to ==None== under AppStage and for all the features you want to add under App Features. My tweaked version works with portrait as well, but requires a bit more tweaking with positioning. Shouldn't be hard in case you want to use screenshots with frame included.]]> Welcome to my blog. I thought it's about time I should start writing, but to be totally honest, that time has passed for quite some time now, so I should rephrase that to: ==I thought it's too damn long since I should have started writing==. I will be writing mostly about iOS development. I'll post snippets, walkthroughs, tips and tricks, stuff that I struggled with and links to interesting stuff I find around the web. The blog is running on Wordpress and it's written using Markdown, with the help of Mou and sometimes Poster. ==Thanks for visiting!== Update, Jun 13, 2014: Erato by 9muses is my new favorite markdown editor - clean, beautiful, powerful; inline preview included. For mobile I've been using Editorial for quite some time now; hands-down, the best editor for mobile. Update, Jun 15, 2013: Completely ditched WordPress and recreated the blog with Sinatra. Read more about it here. Also, the live preview and, most of all, the custom-css for Mou's preview beats any Markdown editor. Update, Mar 22, 2013: Today I downloaded Day One. Since it has great Markdown support, I think I will be replacing Mou to write my posts on the Mac. I'm just sad it has no copy as HTML. Don't really need it, but would have been nice. For mobile I'll keep on using Poster, don't think it can better than this. Update, Apr 30, 2018: Haven't updated this post in a while now... Several editors came and went (either discontinued, either something better emerged), but I'm now using Caret on Mac and Pretext on iOS. I'm not using Dropbox anymore — although the sync still works — so I don't really need a mobile editor that much.]]>
https://rolandleth.com/tech/feed
CC-MAIN-2020-16
refinedweb
40,690
56.76
Opened 14 years ago Closed 9 years ago #240 closed enhancement (fixed) import all files within a directory Description This was discussed for shapefile vectors at: - I cannot just point the program to a directory of shapefiles and tell it import all. QGIS, gvSIG and ArcGIS can do this. We could easily add v.in.ogr.all or v.external.all, I suppose two or three lines of shell script :) Could be stored in the Addons wiki. This chould be enabled for vector and raster files. Examples: - Import all landcover vector files from a folder - Import all SRTM ASC/TIFF files from a folder a nice option would be if one could run ogr2ogr/v.patch or gdal_merge/r.patch while importing to generate a mosaik of all the tiles within the folder with the same step. This should also be implemented for the GUI. Change History (4) follow-up: 2 comment:1 by , 14 years ago comment:2 by , 14 years ago comment:3 by , 10 years ago Its not that hard. Adding a small python script could close this enhancement comment:4 by , 9 years ago Possible from the GUI [1]. For advanced user easy to write a script for that or to use Graphical Modeler. Closing this ticket. (1) An option may be to implement wildcard support in r.in.gdal|v.in.ogr. Possibly a GDAL/OGR function could be called to merge before handing the data block over to GRASS (within the modules). Markus
https://trac.osgeo.org/grass/ticket/240
CC-MAIN-2022-33
refinedweb
249
73.88
I’m trying to replace cells in my Excel worksheet that contains hyphen “-“ with the average value between the above lying cell and the below lying cell. I’ll been trying to do this by looping through each row in column 3 import math from openpyxl import load_workbook import openpyxl d_filename="Snow.xlsx" wb = load_workbook(d_filename) sheet_ranges=wb["PIT 1"]' def interpolatrion_of_empty_cell(): for i in range(7,31): if i =="-": sheet_ranges.cell(row = i, column = 3).value = mean(i-1,i+1) else: sheet_ranges.cell(row = i, column = 3).value wb.save(filename = d_filename) The reason values are not replaced is that you use i to check if its equal to -. i is an index, not the value of a cell. Also to calculate the mean, you are using indices, not the values of top and below cells. So you could solve this in following way: def interpolatrion_of_empty_cell(): for i in range(7,31): cell_value = sheet_ranges.cell(row=i, column=3).value if cell_value == "-": top_value = sheet_ranges.cell(row=i+1, column=3).value bottom_value = sheet_ranges.cell(row=i - 1, column=3).value sheet_ranges.cell(row=i, column=3).value = (float(top_value) + float(bottom_value))/2 Not that this may require tweaking, as it does not accout for cases where tob and bottom rows are -, not numbers, or just empty cells.
https://codedump.io/share/TvKpmvH79sZO/1/replace-missing-values-in-excel-worksheet-using-openpyxl-module
CC-MAIN-2016-50
refinedweb
219
50.53
This article will teach you real time file operations which are needed frequently by programmers e.g. counting words in a file, etc. Here we are going to learn : - Count number of words in a given file - Determining general information on file - Creating and deleting files First we assume that we have an input file which has following contents This is Anurag Jain My username is csanuragjain I like writing Article Mrbool is great place to learn. Now we will discuss to count number of words in file. Listing 1: Count number of words import java.io.BufferedReader; import java.io.FileReader; public class CountWord { public static void main(String args[]) { CountWord s=new CountWord(); try { s.start(); } catch(Exception e) { System.out.print("Some problem occured"); } } public void start() throws Exception { FileReader fr=null; BufferedReader br=null; try { fr=new FileReader("out.txt"); br=new BufferedReader(fr); String line=br.readLine(); int totword=0; while(line!=null) { String a[]=line.split(" "); for(int i=0;i<a.length;i++) { if(a[i].length()>0) totword+=1; } line=br.readLine(); } System.out.print("Total number of words"+totword); br.close(); } catch(Exception e) { e.printStackTrace(); System.out.println("error"+e.getMessage()); } finally { br.close(); fr.close(); } } } Here: - We include BufferedReader and FileReader so that we can read the input file - We make a new class and then in the main method of this new class we call the start method of this class - Now in the start method we define objects of BufferedReader and FileReader and pass the file to be read as an argument - We read the first line of the input file and store that in variable called line.Also we define another variable totwords which will count the total number of words in the input file - Now we make a loop which reads the input file fully - Inside the loop we split the line read from the input file using the split method.We split the line when a space is found.We check whether the split text is having some length so that we make sure its not counting a space again.This will give us a set of all words. - Similarly we move to next line and perform same operation similarly - After that we display total number of words and display on console - We put code inside try-catch so that exception are handled with care Now we will learn how to obtain basic information of a file on your computer Listing 2: Retrieve Basic information of file import java.io.File; public class Fileinfo { public static void main(String args[]) { Fileinfo s=new Fileinfo(); s.start(); } public void start() { try { File f=new File("source.pdf"); System.out.println("Path of file is :"+f.getAbsolutePath()); System.out.println("Name of file is :"+f.getName()); System.out.println("Size of file is :"+f.length()/(1024)+"KB"); } catch(Exception e) { e.printStackTrace(); System.out.println("error"+e.getMessage()); } } } Here: - We import file so that we may perform operation on file - We make a class and also a main method which calls the start method of this class - In start method we make a file object which point to our file - Now we can use the several attribute of File to accomplish our task - We obtain the path of the file by making use of getAbsolutePath method.This will give the location of file on the computer - We determine the name of file by using the getName method. - We determine the size of file using the length method and we divide it by 1024 so that it gets converted to Kb.Further dividing by 1024 give result in MB - This is kept inside try-catch so that exception are caught properly Now we learn how to enhance the previous program Listing 3: Creating, deleting and determining import java.io.File; public class Fileinfo { public static void main(String args[]) { Fileinfo s=new Fileinfo(); s.start(); } public void start() { try { File f=new File("source.pdf"); f.createNewFile(); f.deleteOnExit(); File f2=new File("F:\\Wallpaper\\new"); f2.mkdirs(); if(f2.isDirectory()) System.out.println("Directory"); else System.out.println("File"); } catch(Exception e) { e.printStackTrace(); System.out.println("error"+e.getMessage()); } } } Here, - Like we did earlier we make class, main method and a start method - For first case we make a file object which is pointing to a file which may or may not exist - Now to create this file we use the method createNewFile() which create the file if it does not exist. It return Boolean value telling if the operation was success or not - We can delete this file when we exit the program by using deleteonexit function - Now we take another case where we made a file point to a directory - Now we may use mkdirs to make the given directory in case it does not exist. - In case we don’t know whether the argument passed to File object is directory or File we may use the isDirectory or isFile method. - This is placed under try catch for proper exception handling This is all for today’s article. Hope you liked it. See you next time with some more interesting articles
http://mrbool.com/how-to-count-words-and-get-basic-info-from-a-file-with-java/25520
CC-MAIN-2021-21
refinedweb
868
62.68
Twice a month, we revisit some of our readers’ favorite posts from throughout the history of Activetuts+. This tutorial was first published in September, 2009. Hi, once again, my name is André and in this tutorial I'll demonstrate how to create custom filters with the Pixel Bender Toolkit, then use them with Flash CS4 to output .pbj files. * This feature works only in Flash Player 10. Step 1: The Pixel Bender Toolkit The Pixel Bender Toolkit comes with the Adobe Master Collection CS4 pack, or you can download it at. Step 2: The Basics Before creating any filter, we must understand the basic functions and types of the language. It's different to Flash, and much simpler. The input keyword: This is the input image, the image that will be read and worked on. We can have up to 2 input images in the code, working with one image will create a filter, and working with 2 images will create a blend mode. The input is always type of "image4", which is an image in RGBA mode (Red, Green, Blue and Alpha). The output keyword: This is the output pixel, as opposed to the input. This will not output the image, this will just output the pixel read in RGBA. This is type "pixel4" (and not image4 like the input). The parameter keyword: Parameter keyword will work like a setter function. With the parameter the values of the filter can be changed when in use. The parameter must be followed by the type and name, and can also have minimum value, maximum value and default value. Example: parameter int dimension <minValue:1; maxValue:10; defaultValue:1;>; or parameter float myfloat <minValue:1.0; maxValue: 2.0; defaultValue: 1.0>. Also the parameter can be typed float2, float3, float3, int1, int2... example: parameter float2 test <minValue:float2(1.0,2.0);maxValue:float2(5);defaultValue:float2(1.0,2.0);>; Also we have the types float, float2, float3, float4, int, int2, int3, int4 and many others which we will not use here. Also, some types don't work with Flash Player 10, so I won't get into them right now. I will, however, discuss a little bit about the types I've mentioned here and how they work. Type float, float2, float3 and float4: when you create a float4 type for example, you are creating an array of 4 float values. In Pixelbender the float values are define by dot, but float() also works like a function to convert other number values in float. For example "float4 test=float4(1,2,3,4);". Here we have an object with 4 values (type float) in "test" variable. You can also create a float4 object from one value, for example: "float4 test=float4(3);". Here we have an object with 4 values (RGBA) and all values are the same (3.0 float). When you create a float value, you can also create it using a dot like "float test=1.0;". If you try to define it like "float test=1;" it will throw an error, because numbers without a dot in pixelbender work like int values. So float are always defined by dot. Even using "float()" to create the float value will return a number with a dot. Lastly, to access float values with more than one value you can use syntax like an array access " variable[0] or variable[1] or variable[2] ... ". Type int, int2, int3 and in4 are the same thing as float types, but don't have dots. You can also convert number values using "int" like functions. evaluatePixel(): this function runs over all the image, pixel by pixel, and then returns the output type pixel4. In custom filters for Flash we always use this function. outCoord(): this function returns the current coordinate of the pixel being read by the evaluatePixel function. It returns a value type float2, x and y values, and can be accessed by [] like array or .x and .y like object. For example: var out=outCoord(); //out.x is the same of out[0] and out.y is the same of out[1]. sampleNearest(source,pixelCoordinate): this function returns the float4 value of the pixel from the source image (image4) at the coordinations "pixelCoordinate". Normally we use the "outCoord" function here. An observation must be made; when using float values and you want to add/subtract/multiply or divide the values with other float value of the same length, you can use them like this example: float4 test1=float4(3.0,2.0,2.0,3.0); float4 test2=float4(1.0,2.0,2.0,1.0); float4 result=test1-test2; The result will be a variable type float4 with values 2.0, 0.0, 0.0 and 2.0. Also, you could use: float4 test1=float4(3.0,2.0,2.0,3.0); float4 test2=float4(1.0,2.0,2.0,1.0); float4 result=test1; result[0]=test1[0]-test2[0]; result[2]-=0.5; I think this is enough to understand the structure of Pixel Bender code, let's move onto the next step, after I've mentioned just one more thing: Before testing any filter, it's important to load at least one image (file > load image 1"). To test the filter you can go to build > run, if the filter has any parameters, on the right side of the application you'll see sliders to change the values. They change at runtime and have a live preview, since each time you press run again. Step 3: Create a New Pixel Bender Filter This filter comes with Pixel Bender Toolkit, but is one of the simpler filters to explain. For more about the Pixel Bender language reference just hit F1 button in the program, and the help in .pdf will open. Once the program is open, create a new Kernel Filter (file > new kernel filter) the program will create a default structure for the filter: <languageVersion : 1.0;> kernel NewFilter < namespace : "Your Namespace"; vendor : "Your Vendor"; version : 1; description : "your description"; > { input image4 src; output pixel4 dst; void evaluatePixel() { dst = sampleNearest(src,outCoord()); } } In kernel NewFilter, you change the name NewFilter for the name of your filter. Namespace, vendor, version and description I don't need to explain, just your strings like author, version (int) and the description. The input image will be the image loaded by the filter and the output will be the pixel generated by the evaluatePixel function. The output will be a pixel4 value generated by the evaluatePixel function, which runs pixel by pixel of the input image as I've explained. At the line " dst = sampleNearest(src,outCoord());" we are getting the value of the current pixel, and the coordinate outCoord() from image src (the input image), so we can modify the values of the rgba value of the dst. For example, if we want to invert the colours of the input image, we could do the following: dst = sampleNearest(src,outCoord()); dst.rgb=float3(1)-dst.rgb; What are we doing here? We are stating that the rgb value of this pixel is the array of float3 value less the original value of rgb, so the color will be inverted. You can use the dst.rgb instead of using dst[0], dst[1]... and the order after the dot can be any order, it will read each letter as the value of the color. For example, you can use dst.gbr=float3(1)-dst.gbr. Another thing you can try is to change the colours of the image. For example by using the code below (inside the evaluatePixel function): dst = sampleNearest(src,outCoord()); dst.rgb=dst.brg; This code will output an oddly coloured image. Step 4: Testing a Prepared Code From Adobe Let's test a filter from Adobe. The pixelate filter is great for testing, so go to file > open; in the folder where Pixel Bender is installed there are some filters. Let's choose the pixelate filter. Once it's open you can hit the "run" button to test the filter. If you want to export, go to file > Export kernel filter for flash player. This will export the filter to use with Flash, you can load the filter with the URLLoader or embed with the Embed tag from Flex SDK. In this tutorial I will show how to work with the embedded file, since the filter weighs only about 4kb to 15kb (it's very lightweight). The output extension is a .pbj file. Step 5: Create the Folder Structure If you have a classpath for Flash, use your classpath, if you dont have one and want to create one, open my previous tutorial and follow Step 1. If you don't want a classpath, use the same folder of your .fla document. Let's assume the classpath for the tutorial. In your classpath create the folder "pixelbender". Then inside the "pixelbender" folder, inside your classpath create the folder "pbj". Copy the .pbj file (example: pixelate.pbj) to this pbj folder you've created. Step 6: Creating the Class for the Pixelate Filter Open Flash CS4, or Flex with updated SDK for FP10. If you're using Flash, it's important to setup the Flex SDK for Flash. If you don't know how to do this, hit "ctrl+u" to open the preferences of Flash, then select the "actionscripts" at category, then "Actionsctipt 3.0 settings". In the window "Actionscript 3.0 advanced settings" click the "+" button of the library path and add the following: $(FlexSDK)/frameworks/libs/flex.swc. Click the OK button. Now create a new .as file, and start coding the following: First we set the package and import the necessary classes. package pixelbender{ import flash.display.Shader; import flash.filters.ShaderFilter; import flash.utils.ByteArray; Create the public class PixelateFilter extending the ShaderFilter. The ShaderFilter can be applied as a normal filter in the filter array of any DisplayObject. public class PixelateFilter extends ShaderFilter{ Embed the pixelate.pbj file in the folder pbj (we're assuming that we'll save the .as in the pixelate folder of our classpath). The embed tag is a Flex tag which embeds files in a swf instead of loading them. There are lots of types that you can embed, like .flv, .jpg and others, and as mimeType application/octet-stream the file will be embedded as ByteArray. The embed tag creates a class for the embedded file, here I'm using a class named "Filter". [Embed(source="pbj/pixelate.pbj",mimeType="application/octet-stream")] private var Filter:Class; In the constructor, let's create an instance of our embedded file as ByteArray. The ByteArray is the Shader constructor's parameter, so we'll also create the shader instance, setting the filter to "ByteArray" as the parameter of the constructor. Since we're extending the ShaderFilter, we don't need to create an instance of the ShaderFilter. This class is already ShaderFilter extended, so all we need to do is set the shader parameter of our ShaderFilter class as the shader instance. public function PixelateFilter():void { var filter:ByteArray=new Filter() as ByteArray; //The embedded file as ByteArray var shader:Shader=new Shader(filter); //The instance of Shader this.shader=shader; //setting the parameter shader of our class } Now we create a new parameter for our class, the parameter "dimension". This parameter will affect the "parameter int dimension" created in the pixelbender. The setter function will alter the value, and the getter function will just get the current value. The shader data values can be accessed by "instance.data.value", the values are arrays. If we had a parameter "parameter int2 position;" in the filter for example, we would access it by "instance.data.position[0]" and "instance.data.position[1]" respectively. public function set dimension(value:Number):void { shader.data.dimension.value[0]=value; } public function get dimension():Number { return shader.data.dimension.value[0]; } After all that, just close the package and the class. } } Now the class for this filter is created, save the .as file with the name "PixelateFilter.as" (the same name as the class) in the pixelbender folder inside your classpath (same name as your package, and where you've also created the pbj folder). Step 7: Test the New Filter First step, create a new .fla AS3 document, save it anywhere you want, for example c:/mycustomfilter. Define a class for this .fla document. Open the properties panel of the .fla document from the window > properties, in the box "Class" type "Main", and create a new actionscript file. Copy any image to the same folder of the .fla document, for example I've used one of the sample images from the Pixel Bender examples; YellowFlowers.png, which can be found with the source files. If you don't have the TweenLite class yet, please download it at, and unpack the contents of gs folder inside the gs folder in your classpath. Create a new .as document. Include the necessary classes to our classpath: package{ import flash.display.Sprite; import flash.display.Bitmap; import pixelbender.PixelateFilter; //Our custom filter import gs.TweenLite; //the best Tweening class Create the Main class extending the Sprite class: public class Main extends Sprite{ Embed the image for testing, the mimeType is "image/png", so we're embedding as image not ByteArray. Name its class "Img". Additionally, we type a variable named "filter" of the type PixelateFilter, so we can use it in any function later on. [Embed(source="YellowFlowers.png",mimeType="image/png")] private var Img:Class; private var filter:PixelateFilter; In the constructor, we start creating our image, which will be affected by the filter, then add the image child to the stage. Then create instance of PixelateFilter. We've created the variable before, so we don't need to type again. Set the filter dimension to 100, so we can see the effect better, also add the filter to the filterlist of our Main class. Then using the TweenLite class we animate the filter parameter. The dimension parameter will be animated from 100 to 1. While the animation is being updated, the function "tweenLiteUpdate" is executed, and when it's finished animating the "newTween" function will be executed. public function Main():void{ var image:Bitmap=new Img() as Bitmap; addChild(image); filter=new PixelateFilter(); filter.dimension=100; this.filters=[filter]; TweenLite.to(filter,3,{dimension:1,onUpdate:tweenLiteUpdate,onComplete:newTween}); } While TweenLite is being updated, the tweenLiteUpdate is executed and updates the filter of our Main class. Without this method we wouldn't see the TweenLite updating the filter. private function tweenLiteUpdate():void{ this.filters=[filter]; } When the first Tweening animation completes, it will run the newTween function. The first line of this function will check if the filter dimension value is 1. If so, it will set the dim variable to 100, else it'll set the variable to 1. This is the same with if and else, or switch. The second line will start the Tweening animation of the filter again. private function newTween():void{ var dim:Number=(filter.dimension==1)?100:1; TweenLite.to(filter,3,{dimension:dim,onUpdate:tweenLiteUpdate,onComplete:newTween}); } Now just close the package and the class with double "}". } } Save your file as "Main.as" in the same folder of your .fla file, and if all files and folder are OK, you can test your file. The animation will start pixelated, changing to the normal image, and will loop continuously. Conclusion I hope you liked this, and I hope it will be very useful. In Adobe Exchange there are a lot of other filters you can download, some of them are free or open source. I've also put some other .pbj and classes with the source for studying. For example, SpherizeFilter.as:, animates by the position of the mouse. Envato Tuts+ tutorials are translated into other languages by our community members—you can be involved too!Translate this post
https://code.tutsplus.com/tutorials/create-custom-filters-using-the-pixel-bender-toolkit--active-1772
CC-MAIN-2019-35
refinedweb
2,673
57.67
pyperclip 1.3 dont. Pyperclip runs on both Python 2 and Python 3. Usage is simple: import pyperclip pyperclip.copy('The text to be copied to the clipboard.') spam = pyperclip.paste() - Change Log: - 1.2 Use the platform module to help determine OS. - 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError - Downloads (All Versions): - 10 downloads in the last day - 106 downloads in the last week - 436 downloads in the last month - Author: Al Sweigart - Maintainer: Alexander Cobleigh - Download URL: - License: LICENSE.txt - Platform: Windows,OSX,Linux - Categories - Package Index Owner: cblgh - DOAP record: pyperclip-1.3.xml
https://pypi.python.org/pypi/pyperclip
CC-MAIN-2014-15
refinedweb
111
61.22
You can use application Binary Interface (ABI) Monitoring tooling, available in Android 11 and higher, to stabilize the in-kernel ABI of Android kernels. The tooling collects and compares ABI representations from existing kernel binaries ( vmlinux+ modules). These ABI representations are the .xml files and the symbol lists. The interface on which the representation gives a view is called the Kernel Module Interfaces (KMIs). You can use the tooling to track and mitigate changes to the KMI. The ABI monitoring tooling is developed in AOSP and uses libabigail to generate and compare representations. This page describes the tooling, the process of collecting and analyzing ABI representations, and the usage of such representations to provide stability to the in-kernel ABI. This page also provides information for contributing changes to the Android kernels. This directory contains the specific tools for the ABI analysis. Use it with the build scripts provided by build_abi.sh.) Process Analyzing the kernel's ABI takes multiple steps, most of which can be automated: - Acquire the toolchain, build scripts, and kernel sources through repo. - Provide any prerequisites (such as the libabigaillibrary and collection of tools). - Build the kernel and its ABI representation. - Analyze ABI differences between the build and a reference. - Update the ABI representation (if required). - Work with symbol lists. The following instructions work for any kernel that you can build using a supported toolchain (such as the prebuilt Clang toolchain). repo manifests are available for all Android common kernel branches and for several device-specific kernels, they ensure that the correct toolchain is used when you build a kernel distribution for analysis. Using the ABI Monitoring tooling 1. Acquire the toolchain, build scripts, and kernel sources through repo You can acquire the toolchain, build scripts (these scripts), and kernel sources with repo. For detailed documentation, refer to the corresponding information for building Android kernels. To illustrate the process, the following steps use common-android12-5.10, an Android kernel branch, which is the latest released GKI kernel at the time of this writing. To obtain this branch through repo, execute the following: repo init -u -b common-android12-5.10 repo sync 2. Provide prerequisites The ABI tooling uses libabigail, a library and collection of tools, to analyze binaries. A suitable set of prebuilt binaries comes with the kernel-build-tools and is automatically used with build_abi.sh. To utilize the lower-level tooling (such as dump_abi), add the kernel-build- tools to the PATH. 3. Build the kernel and its ABI representation At this point you're ready to build a kernel with the correct toolchain and to extract an ABI representation from its binaries ( vmlinux + modules). Similar to the usual Android kernel build process (using build.sh), this step requires running build_abi.sh. BUILD_CONFIG=common/build.config.gki.aarch64 build/build_abi.sh That builds the kernel and extracts the ABI representation into the out_abi subdirectory. In this case out/android12-5.10/dist/abi.xml is a symbolic link to out_abi/android12-5.10/dist/abi-<id>.xml. < id> is computed by executing git describe against the kernel source tree. 4. Analyze the ABI differences between the build and a reference representation build_abi.sh analyzes and reports any ABI differences when a reference is provided through the environment variable ABI_DEFINITION. ABI_DEFINITION must point to a reference file relative to the kernel source tree, and can be specified on the command line or, more commonly, as a value in build.config. The following provides an example: BUILD_CONFIG=common/build.config.gki.aarch64 build/build_abi.sh In the command above, build.config.gki.aarch64 defines the reference file (as ABI_DEFINITION=android/abi_gki_aarch64.xml), and diff_abi calls abidiff to compare the freshly generated ABI representation against the reference file. build_abi.sh prints the location of the report and emits a short report for any ABI breakage. If breakages are detected, build_abi.sh terminates and returns a nonzero exit code. 5. Update the ABI representation (if required) To update the ABI representation, invoke build_abi.sh with the --update flag. It updates the corresponding abi.xml file that's defined by build.config. To print the ABI differences due to the update, invoke the script with --print-report. Be sure to include the report in the commit message when updating the abi.xml file. 6. Working with symbol lists Parameterize build_abi.sh with KMI symbol lists to filter symbols during ABI extraction. These are plain text files that list relevant ABI kernel symbols. For example, a symbol list file with the following content limits ABI analysis to the ELF symbols with the names symbol1 and symbol2: [abi_symbol_list] symbol1 symbol2 Changes to other ELF symbols aren't considered. A symbol list file can be specified in the corresponding build.config configuration file with KMI_SYMBOL_LIST= as a file relative to the kernel source directory ( $KERNEL_DIR). To provide a level of organization, you can specify additional symbol list files by using ADDITIONAL_KMI_SYMBOL_LISTS= in the build.config file. This specifies further symbol list files, relative to $KERNEL_DIR; separate multiple filenames by whitespace. To create an initial symbol list or to update an existing one, you must use the build_abi.sh script with the --update-symbol-list parameter. When the script is run with an appropriate configuration, it builds the kernel and extracts the symbols that are exported from vmlinux and GKI modules and that are required by any other module in the tree. Consider vmlinux exporting the following symbols (usually done via the EXPORT_SYMBOL* macros): func1 func2 func3 Also, imagine there were two vendor modules, modA.ko and modB.ko, which require the following symbols (in other words, they list undefined symbol entries in their symbol table): modA.ko: func1 func2 modB.ko: func2 From an ABI stability point of view, func1 and func2 must be kept stable, as they're used by an external module. On the contrary, while func3 is exported, it isn't actively used (in other words, it's not required) by any module. Thus, the symbol list contains func1 and func2 only. To create or update an existing symbol list, build_abi.sh must be run as follows: BUILD_CONFIG=path/to/build.config.device build/build_abi.sh --update-symbol-list In this example, build.config.device must include several configuration options: vmlinuxmust be in the FILESlist. KMI_SYMBOL_LISTmust be set and pointing at the KMI symbol list to update. GKI_MODULES_LISTmust be set and pointing at the list of GKI modules. This path is usually android/gki_aarch64_modules. Working with the lower-level ABI tooling Most users will only need to use build_abi.sh. In some cases, working directly with the lower-level ABI tooling might be necessary. The two commands used by build_abi.sh, dump_abi and diff_abi, are available to extract and compare ABI files. See the following sections for their usages. Creating ABI representations from kernel trees Provided a linux kernel tree with built vmlinux and kernel modules, the tool dump_abi creates an ABI representation using the selected ABI tool. A sample invocation looks like this: dump_abi --linux-tree path/to/out --out-file /path/to/abi.xml The file abi.xml contains a textual ABI representation of the combined, observable ABI of vmlinux and the kernel modules in the given directory. This file might be used for manual inspection, further analysis, or as a reference file to enforce ABI stability. Comparing ABI representations ABI representations created by dump_abi can be compared with diff_abi. Use the same abi-tool for both dump_abi and diff_abi. A sample invocation looks like this: diff_abi --baseline abi1.xml --new abi2.xml --report report.out The generated report lists detected ABI changes that affect the KMI. The files specified as baseline and new are ABI representations that were collected with dump_abi. diff_abi propagates the exit code of the underlying tool and therefore returns a non-zero value when the ABIs compared are incompatible. Using KMI symbol lists To filter representations created with dump_abi or to filter symbols compared with diff_abi, use the parameter --kmi-symbol-list, that takes a path to a KMI symbol list file: dump_abi --linux-tree path/to/out --out-file /path/to/abi.xml --kmi-symbol-list /path/to/symbol_list_file Dealing with ABI breakages As an example, the following patch introduces a very obvious ABI breakage: diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5ed8f6292a53..f2ecb34c7645 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -339,6 +339,7 @@ struct core_state { struct kioctx_table; struct mm_struct { struct { + int dummy; struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; u64 vmacache_seqnum; /* per-thread vmacache */ When you run build_abi.sh again with this patch applied, the tooling exits with a non-zero error code and reports an ABI difference similar to this: Leaf changes summary: 1 artifact changed Changed leaf types summary: 1 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 0 Added function Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 0 Added variable 'struct mm_struct at mm_types.h:372:1' changed: type size changed from 6848 to 6912 (in bits) there are data member changes: [...] Fixing a broken ABI on Android Gerrit If you didn't intentionally break the kernel ABI, then you need to investigate, using the guidance provided by the ABI monitoring tooling. The most common causes of breakages are added or deleted functions, changed data structures, or changes to the ABI caused by adding config options that lead to any of the aforementioned. Begin by addressing the issues found by the tool. You can reproduce the ABI test locally by running the following command with the same arguments that you would have used for running build/build.sh: This is an example command for the GKI kernels: BUILD_CONFIG=common/build.config.gki.aarch64 build/build_abi.sh Updating the Kernel ABI If you need to update the kernel ABI representation, then you must update the corresponding abi.xml file in the kernel source tree. The most convenient way to do this is by using build/build_abi.sh like so: build/build_abi.sh --update --print-report Use the same arguments that you would have used to run build/build.sh. This updates the correct abi.xml in the source tree and prints the detected differences. As a matter of practice, include the printed (short) report in the commit message (at least partially). Android Kernel Branches with predefined ABI Some kernel branches come with predefined ABI representations for Android as part of their source distribution. These ABI representations are intended to be accurate, and to reflect the result of build_abi.sh as if you would execute it on your own. As the ABI is heavily influenced by various kernel configuration options, these .xml files usually belong to a certain configuration. For example, the common-android12-5.10 branch contains an abi_gki_aarch64.xml that corresponds to the build result when using the build.config.gki.aarch64. In particular, build.config.gki.aarch64 also refers to this file through ABI_DEFINITION. Such predefined ABI representations are used as a baseline definition when comparing with diff_abi. For example, to validate a kernel patch regarding any changes to the ABI, create the ABI representation with the patch applied and use diff_abi to compare it to the expected ABI for that particular source tree or configuration. If ABI_DEFINITION is set, running build_abi.sh accordingly will do. Enforcing the KMI using module versioning The GKI kernels use module versioning ( CONFIG_MODVERSIONS) to enforce KMI compliance at runtime. Module versioning will cause CRC mismatch failures at module load-time if the expected KMI of a module doesn't match the vmlinux KMI. For example, here is a typical failure that occurs at module load-time due to a CRC mismatch for the symbol module_layout(): init: Loading module /lib/modules/kernel/.../XXX.ko with args "" XXX: disagrees about version of symbol module_layout init: Failed to insmod '/lib/modules/kernel/.../XXX.ko' with args '' Module versioning uses Module versioning is useful for many reasons: - It catches changes in data structure visibility. If modules can change opaque data structures, such as data structures that aren't part of the KMI, modules will break after future changes to the structure. - It adds a run-time check to avoid accidentally loading a module that isn't KMI-compatible with the kernel. (Such as when a current module is loaded at a later date by a new kernel that’s incompatible.) This is preferable to having hard-to-debug subsequent runtime issues or kernel crashes. abidiffhas limitations in identifying ABI differences in certain convoluted cases that CONFIG_MODVERSIONScan catch. As an example for (1), consider the fwnode field in struct device. That field MUST be opaque to modules so that they cannot make changes to fields of device->fw_node or make assumptions about its size. However, if a module includes <linux/fwnode.h> (directly or indirectly), then the fwnode field in the struct device stops being opaque to it. The module can then make changes to device->fwnode->dev or device->fwnode->ops. That's problematic for several reasons: It can break assumptions the core kernel code is making about its internal data structures. If a future kernel update changes the struct fwnode_handle(the data type of fwnode), then the module won't work with the new kernel. Moreover, abidiffwon't show any differences because the module is breaking the KMI by directly manipulating internal data structures in ways that can't be captured by only inspecting the binary representation. Enabling module versioning prevents all these issues. Checking for CRC mismatches without booting the device Any kernel build with CONFIG_MODVERSIONS enabled does generate a Module.symvers file as part of the build process. The file has one line for every symbol exported by the vmlinux and the modules. Each line consists of the CRC value, symbol name, symbol namespace, vmlinux or module name that's exporting the symbol, and the export type (for example, EXPORT_SYMBOL vs. EXPORT_SYMBOL_GPL). You can compare the Module.symvers files between the GKI build and your build to check for any CRC differences in the symbols exported by vmlinux. If there is a CRC value difference in any symbol exported by vmlinux AND it's used by one of the modules you load in your device, the module won't load. If you don't have all the build artifacts, but just have the vmlinux files of the GKI kernel and your kernel, you can compare the CRC values for a specific symbol by running the following command on both the kernels, then comparing the output: nm <path to vmlinux>/vmlinux | grep __crc_<symbol name> For example, to check the CRC value for the module_layout symbol, nm vmlinux | grep __crc_module_layout 0000000008663742 A __crc_module_layout Fixing CRC mismatch If you get a CRC mismatch when loading the module, here is how to you fix it: Build the GKI kernel and your device kernel, and add KBUILD_SYMTYPES=1in front of the command you use to build the kernel. Note, when using build_abi.sh,this is implicitly set already. This will generate a .symtypesfile for each .ofile. For example: KBUILD_SYMTYPES=1 BUILD_CONFIG=common/build.config.gki.aarch64 build/build.sh Find the .cfile in which the symbol with CRC mismatch is exported. For example: cd common && git grep EXPORT_SYMBOL.*module_layout kernel/module.c:EXPORT_SYMBOL(module_layout); That .cfile has a corresponding .symtypesfile in the GKI, and your device kernel build artifacts. cd out/$BRANCH/common && ls -1 kernel/module.* kernel/module.o kernel/module.o.symversions kernel/module.symtypes a. The format of this file is one (potentially very long) line per symbol. b. [s|u|e|etc]#at the start of the line means the symbol is of data type [struct|union|enum|etc]. For example, t#bool typedef _Bool bool. c. A missing find_module s#module * find_module ( const char * ). Compare those two files and fix all the differences. Case 1: Differences due to data type visibility If one kernel keeps a symbol or data type opaque to the modules and the other kernel doesn't, then it shows up as a difference between the .symtypes files of the two kernels. The .symtypes file from one of the kernels has UNKNOWN for a symbol and the .symtypes file from the other kernel has an expanded view of the symbol or data type. For example, assume you add this line to include/linux/device.h in your kernel: #include <linux/fwnode.h> That causes CRC mismatches, with one of them for module_layout(). If you compare the module.symtypes for that symbol, it looks like this: $ diff -u <GKI>/kernel/module.symtypes <your kernel>/kernel/module.symtypes --- <GKI>/kernel/module.symtypes +++ <your kernel>/kernel/module.symtypes @@ -334,12 +334,15 @@ ... -s#fwnode_handle struct fwnode_handle { UNKNOWN } +s#fwnode_reference_args struct fwnode_reference_args { s#fwnode_handle * fwnode ; unsigned int nargs ; t#u64 args [ 8 ] ; } ... If your kernel has it as UNKNOWN and the GKI kernel has the expanded view of the symbol (very unlikely), then merge the latest Android Common Kernel into your kernel so that you are using the latest GKI kernel base. Almost always, the GKI kernel has it as UNKNOWN, but your kernel has the internal details of the symbol because of changes made to your kernel. This is because one of the files in your kernel added a #include that isn't present in the GKI kernel. To identify the #include that causes the difference, follow these steps: - Open the header file that defines the symbol or data type having this difference. For example, edit include/linux/fwnode.hfor the struct fwnode_handle. Add the following code at the top of the header file: #ifdef CRC_CATCH #error "Included from here" #endif Then in the module's .cfile (the one that has a CRC mismatch), add the following as the first line before any of the #includelines. #define CRC_CATCH 1 Now compile your module. You'll get a build-time error that shows the chain of header file #includethat led to this CRC mismatch. For example: In file included from .../drivers/clk/XXX.c:16:` In file included from .../include/linux/of_device.h:5: In file included from .../include/linux/cpu.h:17: In file included from .../include/linux/node.h:18: .../include/linux/device.h:16:2: error: "Included from here" #error "Included from here" One of the links in this chain of #includeis due to a change done in your kernel, that's missing in the GKI kernel. Once you identify the change, revert it in your kernel or upload it to ACK and get it merged. Case 2: Differences due to data type changes If the CRC mismatch for a symbol or data type isn't due to a difference in visibility, then it's due to actual changes (additions, removals, or changes) in the data type itself. Typically, abidiff catches this, but if it misses any due to known detection gaps, the MODVERSIONS mechanism can catch them. For example, assume you make the following change in your kernel: diff --git a/include/linux/iommu.h b/include/linux/iommu.h --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -259,7 +259,7 @@ struct iommu_ops { void (*iotlb_sync)(struct iommu_domain *domain); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain, - dma_addr_t iova); + dma_addr_t iova, unsigned long trans_flag); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); That would cause a lot of CRC mismatches (as many symbols are indirectly affected by this type of change) and one of them would be for devm_of_platform_populate(). If you compare the .symtypes files for that symbol, it might look like this: $ diff -u <GKI>/drivers/of/platform.symtypes <your kernel>/drivers/of/platform.symtypes --- <GKI>/drivers/of/platform.symtypes +++ <your kernel>/drivers/of/platform.symtypes @@ -399,7 +399,7 @@ ... -s#iommu_ops struct iommu_ops { ... ; t#phy s_addr_t ( * iova_to_phys_hard ) ( s#iommu_domain * , t#dma_addr_t ) ; int ( * add_device ) ( s#device * ) ; ... +s#iommu_ops struct iommu_ops { ... ; t#phy s_addr_t ( * iova_to_phys_hard ) ( s#iommu_domain * , t#dma_addr_t , unsigned long ) ; int ( * add_device ) ( s#device * ) ; ... To identify the changed type, follow these steps: - Find the definition of the symbol in the source code (usually in .hfiles). - If there's an obvious symbol difference between your kernel and the GKI kernel, do a git blameto find the commit. Sometimes a symbol is deleted in one tree, and you want to delete it in the other tree. To find the change that deleted the line, run this command on the tree where the line was deleted: a. git log -S "copy paste of deleted line/word" -- <file where it was deleted> b. You'll get a shortened list of commits. The first one is probably the one you are searching for. If it isn't, go through the list until you find the commit. Once you identify the change, either revert it in your kernel or upload it to ACK and get it merged.
https://source.android.com/devices/architecture/kernel/abi-monitor?authuser=1
CC-MAIN-2022-21
refinedweb
3,480
57.27
Comment: Re:High Certainty. (Score 3, Informative) 324 Comment: Re:I still like ubuntu (Score 1) 374 "Does some QA" and "distributing" have been the most innovative contributions from Canonical to the FOSS world and one of the reasons why Linux is reaching the consumer mainstream. Maintaining support channels, distribution, testing, QA, and infrastructure are very costly and time-consuming things, but they're the sort of stuff that separate enthusiast products from turnkey and consumer products. This distinction is not at all trivial, and I fill is underestimated by most FOSS enthusiasts since, well, they've never been on the other side of the supply chain. Basically, commercialization, distribution, integration and support are the most costly parts in most products, more costly than actual development. But it's boring tedious, and standardized, so not deemed to be of interest. Comment: Re:We had that setup in the 1960s and the 1970s. (Score 2) 142 Comment: Re:Quelle surprise! (Score 1, Insightful) 175 Comment: Re:So what? (Score 1) 152 Comment: Re:The greater problem (Score 1) 447 is to make these models, and considering that the impact is still absolutely massive, it is intellectually dishonest to disregard these results. In construction, materials are made to withstand their recommended pressures and load values 30% above what the expected usage will be. It doesn't matter for all practical purposes wether it will stand 30% more or 200% more; what matters is that it goes above 20%. Most constructions could stand double the load they have. Well, in AGW, even 10% of the estimated damage is so great that it merits taking care of it. Engineers frequently make estimation errors in the order of magnitude, using incredibly precise measuremente tools, and nobody complains. Let's not be fools about this either. Comment: Re:Before people scream consistency... (Score 4, Insightful) 447 Comment: Re:People who do not learn from history.... (Score 1) 253 clustering techniques because there are no hard dependencies between nodes. Sorry, but J2EE has absolutely nothing a modern Perl/Python/Ruby framework doesn't have today, except mechanism for managing the inherent complexity of a crappy language with a shitty object model that needs overly complex architecture to make up for its fundamental lack of programming constructs. You're not going to find equivalents to @Stateless beans, Message-driven beans or any of that bullshit because decent frameworks make managing state something absolutely trivial, not an exercise in frustration that requires configuring 5 XML files and implementing two interfaces just to save an object to a database. Nobody cares about RMI or "messages" because you can make a restful web service with 2 regular expressions and a 15-line Ruby file. Nobody cares about monstrous ORMs that can barely handle programmatic queries when SQLAlchemy does that and with a 20-minute tutorial you're already up and running. Oh, and just for the record, most people dn't even need to check for memory leaks because they use a language VM that consumes at most 20% of your runtime's resources and does the hard work with C plugins. Let the scourge of J2EE and "entrerprise" frameworks burn in the underworld. Comment: Re:getters setter :) (Score 2, Insightful) 253 Comment: Re:Except when markets fail (Score 1) 445 Comment: Re:You damn well should (Score 4, Insightful) 605 Comment: Re:PhD required? (Score 1) 1093 Comment: Re:A huge pain (Score 1) 531 You're confusing a variety of unrelated things here. Javascript works fine in every browser that implements standards accordingly (that is, every browser with the exception of IE 6, 7 and 8). The language is not only consistent across browser, it's actually implementing some really interesting features such as list comprehension, generators, and block scoping. And I don't know where you get the idea that debugging Javascript is any more difficult than any other scripting language. You can't claim to be a professional JS dev and not have heard of some tools. Oh, and as a scripting language, it is one of the fastest dynamically typed languages available, in the same league as SmallTalk and Lua. The fact that Palm developers obviously used the wrong tool for the wrong job does not in any way detract from the qualities of the language. Methinks there's a lot of people that talk crap about Javascript but have never bothered to get the proper documentation and tools. Newsflash for everyone: anyone who does professional Python and Ruby development uses debuggers and text editors specifically for that job. Just because JS runs on the browser doesn't mean it doesn't need the same level of attention. Comment: Re:Clueless (Score 1) 531 What are you talking about? Javascript's only similarity with Java and C comes in the form of its syntax, made to appease Java a C++ programmers. NOTHING else in the language is even remotely similar, and its developers have made that clear from day one. Does Java have support for closures and first-class functions? Does Javascript have support for namespaces? Does Java support prototypical object orientation Can Java use hashtables as objects?
http://slashdot.org/~Daishiman
CC-MAIN-2014-35
refinedweb
862
51.58
Hi java peoples, I just recently started programming java, and I having a problem with my program. I'm trying to determine the number of even, odd, and zero digits in a number inputted by the user. It counts even and odd digits fine, however, it determines every zero digit to be an even digit. For example, when I enter a number like 140, the program says that there are 2 evens, 1 odd, and 0 zeros, where it should be 1 even, 1 odd, and one zero. Any help would be appreciated. Code Java: public class Project_6 { public static void main (String[] args) { System.out.print ("Enter a number: "); String value = Keyboard.readString(); int evens = 0; int odds = 0; int zeros = 0; int count = 0; int value_int; while (count < value.length()) { value_int = value.charAt(count); if (value_int % 2 != 0) odds++; else { if (value_int == 0) zeros++; else evens++; } count++; } System.out.println ("\nEven digits: " + evens + "\nOdd digits: " + odds + "\nZero digits: " + zeros); } }
http://www.javaprogrammingforums.com/%20whats-wrong-my-code/32256-problem-my-code-fairly-new-java-printingthethread.html
CC-MAIN-2015-35
refinedweb
161
67.04
. Let’s see an example of a simple script featuring all of the abstractions Ninja provides: # build.ninja cc = clang cflags = -Weverything rule compile command = $cc $cflags -c $in -o $out rule link command = $cc $in -o $out build hello.o: compile hello.c build hello: link hello.o default hello Putting aside the fact that there’s no point in writing something like this for a single file, let’s see what’s going on there. First, we define 2 variables and later refer to them using the $ sign. Second, there are rules. Rules are essentially functions that call an external command to perform an action. Finally, build statements are used to define dependencies between input and output files. If you were to write the same with Make using its conventions, you’d probably end up with something like this. To see a more realistic example with proper dependency tracking, I converted libgit2’s Makefile.embed to Ninja. The Makefile compiles libgit2 and creates a static library out of it. You can see the result here. You’ve probably noticed a few things. First, Ninja scripts are explicit. You cannot use any fancy substitution/wildcard functions (or any other control structures for that matter). As a result, the script is not only much longer but it also cannot handle any conditions making it unsuitable to any multi platform/compiler development. And this is by design. I’ve mentioned that Ninja is meant to be used with a higher level tool (generator). One of the reasons for doing that is to overcome the said issues. In practice this means that you do not care about the absence of conditions or any other capabilities because the generator handles it for you simply by generating a different set of build scripts. Ninja comes with a simple Python based generator. The generator is straightforward, you call the methods and it outputs the corresponding Ninja syntax to a file. Since it’s just Python, you can make all of the platform and compiler decisions here. In fact, this is the way Ninja itself is built: from ninja_syntax import Writer with open("build.ninja", "w") as buildfile: n = Writer(buildfile) if platform.is_msvc(): n.rule("link", command="$cxx $in $libs /nologo /link $ldflags /out:$out", description="LINK $out") else: n.rule("link", command="$cxx $ldflags -o $out $in $libs", description="LINK $out") The fun part is that Ninja is already supported by some of the most popular meta build systems out there - CMake and Gyp. If you have a CMake based project and assuming you have Ninja available in your PATH, all you need to do is to choose Ninja as the generator: $ cd libgit2 && mkdir build && cd build $ cmake -GNinja .. $ ninja With this change, CMake generates a bunch of Ninja build files and Ninja builds the project. Notice that there’s no need to specify the number of parallel jobs ( -j [jobs]) because Ninja automatically chooses the value based on the number of cores available. The compilation speed is not very different although it might be a bit faster due to Ninja consuming very little CPU while driving the build process. What is however very significant are the time savings when working with the source code and invoking the build process again. A non-scientific benchmark performed on my laptop shows that Ninja is indeed much faster: These savings will become even more significant as your project grows. I encourage you to try Ninja and compare the build cycle times with Make. You will very likely see a similar difference. If you want to learn more about Ninja, here’s a few links: If you enjoyed the article, you should follow @JiriPospisil on Twitter and subscribe via feed.
http://jpospisil.com/2014/03/16/replacing-make-with-ninja.html
CC-MAIN-2017-13
refinedweb
624
64.1
Robert Love, well known figure for his kernel hacking, preemptive patch and his recent book (review), joined Ximian recently in an effort to improve the Linux desktop experience via kernel development. Today we feature a mini-Q&A with Robert about this new project.1. ‘meetings’‘s. “That said, I think the reason BeOS is so often touted as being very responsive is that it was highly threaded and traded throughput for latency at many levels.” Latency vs. throughput is always a tradeoff (if you can win on both fronts, the original design was just bad). I wouldn’t list threading as a factor. In quite a variety of cases the OS felt very responsive even though the number of threads involved was not any higher than it would be e.g. between an X server and an X app. One of the real reasons why the OS was responsive was that it had very few “giant locks”. Another reason is that the scheduler was aggressive. The heartbeat was set to 3ms but seldom triggered. Timers were extremely accurate, and the latency from IRQ to uses-space was very low. Yet another reason was that the client-server communication was highly asynchronous. And one last reason (the one that Eugenia invoked) was that several core user-space processes (and especially the app_server) were very careful to use the kernel in ways which were known to be effective (surprisingly, those ways mostly involved to try to NOT reschedule too often, so that a useless latency improvement wouldn’t result in a visible loss of throughput). JBQ, in what ways was app_server different, and similar, to X? today has been a great day because you have been posting on a lot of stories and it is certainly nice to have a truly informed person making comments on such things. It should be interesting to see how OpenBeOS behaves when it comes to perceived responsiveness. I still cannot uselinux as a desktop OS because it lags badly (haven’t tried 2.6 yet). Since OBOS are trying to revive the BeOS look’n’feel, it will be interestnig to see how it behaves performance wise. Incidently, the OBOS app_server seems to be near alpha stage as we speak. also, it would seem to me that Linux is moving in the direction of design that you outlined BeOS to have had. try fedora, it is very responsive. >try fedora, it is very responsive. After doing some stuff on Gnome, try clicking the hat menu. It gets unresponsive for about 3-4 seconds. This (bug? I am not the only one seeing it) only started happen with Fedora, and it doesn’t happen on other distros. Probably some of the patches Red Hat added creates this specific unresponsiveness which is pretty irritating. We should be doing cool stuff, like asking the user if they want to copy their photos to a new album. There we go. Linux is pretty responsive for me. More than any OS I’ve used(XP, OS X). But I’m sure Gentoo had a part to play in that. A few apps take for ever to load and are just outright sluggish, though. (OOo anyone?) Others take a while to load, but once loaded they are responsive. Others just load fast and respond equally well. Then 2.6 changed that all. 🙂 no worse than Windows XP. I get lags like that with my menues in XP so I do not really care as much. no worse than Windows XP. I get lags like that with my menues in XP so I do not really care as much. I thought I was the only one getting those lag under Windows XP. My dual boot with suse 9 is faster then windows XP. Perhaps because suse is able to use much more of my ram then xp, even dipping into swap at times. 2000+ WDJB 1G DDR “in what ways was app_server different, and similar, to X?” Well, those are two totally different beasts, actually. X is a standardized and fairly rigid generalized protocol with many different implementations. The app_server on the other hand used a custom and ever-changing specialized protocol. I can’t talk in detail about any X implementation, the only one I ever had a remote look at was a basic port of XFree86 to BeOS (probably around 1999, and it was not even a current version), and at the time it looked quite primitive and looked more like a reference implementation than like optimized production code (I don’t mean to offend anyone, but sometimes you need to do a few ugly things in order to squeeze performance out of your code). In terms of protocols, the app_server relied heavily on two mechanisms, shared heaps and kernel-level message queues. It couldn’t have worked without those. Once, for the sake of the experiment, we tried to serialize parts of the app_server protocol through a byte pipe (similar to what X does) and the performance was so poor that we gave up before even making bitmaps work. Oh, and there was some behind-the-scenes black magic with the way scrollbars were handled (anyone who tried to inherit from BScrollBar would have noticed). At some point some other types of views were handled entirely inside the server, including the Terminal (but very few people used versions of BeOS where that was the case). Don’t make me puke! The perfect interface was invented *long* ago: The File System Fix Linux to support the most basic features a sane OS needs: 1) Per-process namespaces(already in 2.6, thanks Viro!) 2) (l)user-space filesystems(otherwise (1) is quite useless) 3) 9p support for transparent filesystem access As for threading, if someone got a clue and did a useful threading system like Plan9 has had for ages; in most other OSes threads are just process with shared mem and some extra useless complexity thrown in, talk about idiotic ideas, for that you should add extra flags to fork() to indicate if you want shared mem, shared namespace, etc. you don’t need the added complexity of threads for that. \k I can confirm that we have very bad responsiveness on XP here in the 3 computers we have in my company: – start menu is very slow, it lags when opening some submenus – sometimes, the explorer does not respond for several seconds (does not display the disk content, even when only 1-5 files in the directory), most often via the network, but also often on the local hard drives. (systems are two P4 3GHz and 1 centrino 1.5GHz). I’m sure I’ll get flamed for saying this, but I am really tired of the wide variation in responsiveness of linux depending on distro, hardware etc. I mean, with XP, I pretty much know what to expect with a given set up – memory amount and kind, hard drive speed, video card etc. I can look at a specs of a system and pretty much predict how XP will behave on that system (and that includes various tweaks). Now, that simply is not the case with linux – you can have exactly the same OS version (same distro, same kernel, same everything) and yet, there is no way to say how this will behave on any given system – other than *maybe* extremely general stuff like “more ram, faster” (and even that is not dead certain sometimes). I really think that is due to something inherent in a combination of non-optimized drivers, various combinations of DE stuck on top of the kernel each responding differently and apps which in linux rarely seem to be written with responsiveness in mind (mind you, I’m not talking about different distros that mix and match and patch and hack their own brews). It’s a mess. It’s a bigger mess than XP or OSX. I don’t know about others, but to me personally, responsiveness is very important, and I am amazed at how many linux users are so willing to put up with absolutely dog slow performance – stuff that raises my blood pressure instantly. I don’t know if there is even a cure for linux possible… sure 2.6 might be faster, but boy that’s only ONE of a dozen factors… it’s like pushing on a string. The worst thing is that you can spend weeks tweaking your system, and it still will suddenly and inexplicably go into “slow mode”. Say what you will about the XP cow – at least it is consistently mediocre. Rant over, flame away. A kqueue implementation for Linux seems like a nice way for delivering arbitary kernel events to userspace. Better than having X diffrent notification schemes for diffrent devices/subsystems, and much better than polling ,say, files in /proc/ .. Any thoughts ? Oh, read and I thought I was the only one getting those lag under Windows XP. no youre not. i had stuff like explorer crashes (filemanager and desktop crashes and reloads and stuff – apps who dont want to start because theyre crap – norton security bla for example A LOT) luckily not on my computer, but on some others. windows people just reinstall everything then, if they care about performance, update to the latest internet worms etc. it really depends also on what you do to “optimize” your system. some jerks just break more, and some supposed low latency patches make the desktop even more sluggisher. i stayed since 2.4.18 with debians default binary kernel, and i have no ui-lag or similar. and yea i can wait for some app to load. but never longer than some seconds. (you dont expect gimp to spring up in one second – nor do you expect that from photoshop – rhythmbox also loads as fast as itunes) one thing bugging me at the moment are some bugs in gnome-theme-manager, but i think when i apt-get the new ones it might well be fixed. The perfect interface was invented *long* ago: The File System The thing is, as I understand from Robert Love’s answers, right now in order for the desktop to detect an usb hard disk being plugged you need to make polling. Also think that to get hardware stats on linux you do things like catting /proc, and so on. Filesystem abstraction is nice, but I guess a message passing system for hardware notifications is a necessary step for the desktop. Unless you want to poll the filesystem to detect changes (not very optimal, IMO). I am the one who posted the comment with mentions of 3 computers under XP. I am sorry you feel wounded by the fact that several Windows user compare their experience about the lack of responsiveness of Windows XP. As for Linux, using Mandrake since 7.0 (kernel 2.2) and having used Debian Slink (2.0) before (kernel 2.0), I have always have acceptable responsiveness with it. Kernel 2.4 is a wonder in my opinion. I have a 600 MHz PC at home, and have never experienced latency problems (mouse jumping or xmms skipping) with it, even when I compile scribus or gimp (this can be a 1 hour task on my computer) and have other tasks on. This maybe related to my light DE configuration (Rox-Filer + Enlightenment), but I have consistently a responsive desktop under heavy load. At work, I can hang (no answer for 10 seconds) XP when Matlab or other soft are computing heavily. Never on Linux. Sorry if you do not appreciate my experience, but I am no liar and you will have to live with this fact. PS: by the way, the 3 PCs I mentionned previously all are installed by Professional Windows administrators, not by me. That will turn Linux in some kind of windows on Linux kernel, and might even compromise kernel operations. I like small and simple UNIX philosophy, I do not mess with KDE and Gnome. I hope it will never happen. Otherwise I wold have to migrate to FreeBSD. DG > I hope it will never happen. Otherwise I wold have to migrate to FreeBSD. You should’ve years ago then. Oh yeah ! .” it’s slow. but Windows server 2003 is very fast. — If this is the direction Novell/Ximian is going, to improve linux desktop in general, not focusing only in one part of the community (the GNOME part), but providing integration and general improvements, they truly deserve my sincere thank you. RH and SUSE were already going in this direction. There is no reason a GTK/GNOME app could not feel at home in a KDE desktop and vice versa. Sharing common infrastructuro only make things better. If you want to find out when a file has changed without polling, there should be a very fast and low-level mechanism to get notified for file changes, like there was in BeOS. In linux there is the dnotify system call, but it is only useful for single directories, and not for whole directory trees. This is an area that definitely needs improvement. In windows, there is an API that lets you watch a complete directory tree for changes of a certain kind. I would love to see something like this. Things like live queries would be very easy to implement given efficient file systems like reiser4 and file change notification. “not talking about different distros that mix and match and patch and hack their own brews). It’s a mess. It’s a bigger mess than XP or OSX. ” Nope. Strip out everything but an equivalent amount of barebones “comes with the OS” software, like Notepad equivalents (Kedit, kppp, etc.) and you would find that they are just about all the same speed. Load a bunch of stuff on windows and you get varying degrees of load times and responsiveness as well. Try bearshare if you want slow. I’m sure I’ll get flamed for saying this, but I am really tired of the wide variation in responsiveness of linux depending on distro, hardware etc. I strongly suspect that a lot of the bad responsiveness people report is due to harddrives not being in DMA mode. Way too common, unfortunately. They do have something like that, its called imon/FAM. Both GNOME and KDE use it. imon only works for the node you told it to watch, it doesn’t recurse into subdirectories. So if you are monitoring a directory and you add files to a subdirectory of that directory you will get no event from FAM or imon. You have to manually set up your query to do that, no big deal really but it isn’t equivalent to BeOS or Windows. As far as I remember BeOS can’t node-monitor an entire hierarchy (the name “node-monitoring implies that you can only monitor one node), and I’m sure than queries can’t search on the path. I know about FAM. But unfortunately it is just a library that does its own polling. This works just fine when you want to watch for the properties of a folder to change to display the changes in a file manager such as konqueror (and that is in fact what it is designed for). But if you wanted to do something like using tiny files on reiserfs as extendable attributes and doing live queries on them, this would be way too slow. There really needs to be a fast low-level mechanism for watching for file changes in whole directory trees. Unfortunately I have no idea how to implement something like this. I asked hans reiser about such a feature, but he answered that such a feature would best be implemented in the linux VFS layer. I have no detailed knowledge on how the VFS layer works and wether it is capable of handling extremely small files, so I can not implement it myself. But I still think such a feature would be great. You could represent arbitrary attributes as tiny files in a directory, so you would get all the advantages of live queries without breaking the simple unix principle of “everything is a file”…. I do not know how live queries were implemented in BeOS. Obviously you are way more qualified to answer this 🙂 But having a way to monitor an entire filesystem hierarchy for changes would be immensely useful. It would not be simple to implement (all sorts of nasty race conditions when querying a constantly changing file system), but it would be a very simple interface. It can not be too hard to do this, since NTFS supports it. You can e.g. watch for changes in all files named “test.txt” in your entire C: tree, and it does not even slow the system down noticeably. Yep, BeOS can not monitor entire hierarchies. OTOH, you can monitor a directory and get notified about entries added/removrd to it and, with that info, start monitoring the new entries. Also, yes, you can’t do path-based queries. They’re always system-wide. You can, of course, programatically filter the query results to only display the results that match a specific path. -Bruno Node monitoring is not really the same thing as queries. You *CAN* ask the system for all files that match “*.txt” and then get notifications about files that match it being created or removed anywhere in the hierarchy. That works pretty weel, in fact, when you consider BFS. OpenBFS even has improvements on this (like searches on non-indexed attributes). -Bruno Getting changes for all files matching a certain pattern on the whole directory tree is not as good as what NT provides, but it would work well enough. Is this possible under linux, or are you talking about BeOS? I am talking about BeOS. -Bruno If your processor is overeating you’ve got more problems than just responsiveness in your GUI… or maybe it’s grown too lethargic from all that overeating….? 🙂 time to put the P4 on a crash diet. I believe FAM only resorts to polling if there is no kernel level mechanism present. However, if imon or dnotify (the linux equivalent) are patched into the kernel’s VFS, you get very efficient behavior, without polling. dnotify is standard in the kernel since somewhere in 2.4.x. I wish more people knew about this. It’s such a good way of doing things. Yes, you are right. FAM uses dnotify if available. But since dnotify can only query a single directory it is basically useless for anything more complicated than a file manager. BeOS live queries or even the NTFS mechanism (whatever it is called) are way ahead of this. <quot>one thing bugging me at the moment are some bugs in gnome-theme-manager</quot> done with the 2.5.0 In previous versions of Mandrake when I moved large numbers of files, the system would bog down, and stall. In the latest MDK 9.2, I did the same tasks without any problems. This was a VERY noticeable improvement… They probably backported stuff from 2.6 to 2.4 for MDK 9.2… I can’t compare to Win because I’ve been windows free for 46 months! MarkP “BeOS live queries or even the NTFS mechanism (whatever it is called) are way ahead of this.”. BeFS was horribly slow (try tarring/untarring and rm -rf ing the kernel source tree on a BeOS). Smart admins know that if they want to do lots of queries they can run a DB server. Come to think of it … never did see a lot of Oracle deployments on BeFS so what good is it for real “queries”? >. Hello idiot. Sometimes, you want to know when a given file in a directory changes (= log file updated, new file arrived by file transfer, etc) and you don’t want to “ls” that directory every second to find out. It would be a lot easier if the kernel told you “file blahblah located in /blah/blah changed size”. Now, go back to compiling your shit and leave desktop for Windows XP. Getting a tad touchy there aren’t we? Come on people, this is a discussion place. Not a battlefield. An OS is an OS and we *all* know that Windows XP isn’t exactly a shining example of what’s best. I know that creating and changing files under Linux (especially ReiserFS) is way faster than on NT or BeOS. But I was talking about *watching for file changes* in a directory tree. And that is way, way, waaaay slower under linux than under BeOS or even NT. I would like to combine the advantages of Reiser4 (excellent performance especially with small files, ***transactions***) with the nice features of BeOS (very fast change notification) to create something really advanced on par or better than the Longhorn WinFS. Reading your post I get the impression that you do not even understand the problem… Well, I like the interview. And speaking about Linux being unresponsive I must disagree. My KDE under 2.4.22 was very reponsive and with full release of 2.6.0 it rocks. I would like to say: ,,Go and make it better than ever” to Robert Love. And thanks for all the good work. From the interview: “I am very stoked — world domination has never been so attainable.” This is what keeps me away from deploying Linux on my home systems. We would never want to replace the Windows monopoly with a Linux one. We want a healthy competition (not quite there yet) that spurs development. That such a prominent Linux developer says this is to me very disturbing Linux can and should be so much more. I’ll stick with *BSD until something radically changes in the Linux community *flamesuit on* As a business/personal user, I’m disturbed by Robert’s answer to Eugenia’s question #7. “Robert Love: There is definitely no plans to make binary drivers happier. Linus has made it clear: he and many other kernel hackers just do not care about them. I don’t mind that, either!” We, the users, just want to get our work done. Sometimes, that requires installation of a device that requires a driver. We don’t want to recompile anything, just have the device work the way it’s supposed to work. Binary drivers help to make that possible for the non-techie. Not caring about the user’s experience is a sure way to inhibit “world domination.” MS now dominates because they understand and cater to the “unsophisticated users” that comprise well over 95% of the computer users of the world. By Rich Lewine (IP: —.ba-dsg.net) – Posted on 2003-12-20 17:57:31 As a business/personal user, I’m disturbed by Robert’s answer to Eugenia’s question #7. You understand that binary drivers (modules) are just fine? They’re talking about closed-source drivers/modules. nvidia’s stuff is even worse since it’s non-standard junk all over. No, I didn’t realize that closed source was the object of his comment. Perhaps if I were a programmer it would have been obvious. That just makes my point. Assumptions seem to be made about users capabilities, from both a knowledge and skill perspective. I do, however, now better understand their position (Linus, Robert, et al); an open source OS should have open source drivers/modules. So the challenge appears to be getting nvidia, etc., to agree and comply. Apparently, not an easy task, even getting them to open their source for others to do the work. I’m still troubled. OSS leaders seem to be as intransigent as the proprietary bunch. Unless there is a meeting of the minds, a commonality developed, This standoff could go on interminably. The users are the losers. . .and that’s who this stuff is for. Linux is no longer a hobby OS. The design of Unix is to have few and simple kernel calls which can be used in powerful ways. dnotify does just one thing and does it well. If you want to monitor an entire tree, write a recursive descent function which sets up dnotify for all nodes in the tree. Then, if notified of new directory creations, add dnotify for that directory. No need to make a simple system call complicated. Yes, it adds a little more code on your part, but only if you need it, not for every programmer who doesn’t need it. And, efficiency wise, it will be just the same as if you had a single system call to notify on changes to an entire tree. I agree, it is disturbing to see the standoff between OSS leaders and hardware companies developing proprietary drivers. Fortunately, there are a few distribution companies that are making that effort for users, but unfortunately, each one solves the problem incompletely, and in their own way. Hopefully, someone will be big enough to see beyond their own turf, and solve it for all of us. the problem of dnotify is that it *needs* a file descriptor per file monitored, as file descriptors are limited this makes monitoring a whole tree o subtree impossible. Linux SE can do this without the need of keeping a file descriptor per file, maybe in 2.7 we will hace a solution based on the hooks provided by Linux SE. I really don’t get why people consider this a “standoff” between OSS leaders and hardware companies. I think it was Wired that interviewed Linus recently where he indicated that though it is a problem today, it’s not likely to last. It used to be like pulling teeth to get specs for various SCSI cards from hardware vendors, much less actual open source drivers. Now since Linux is so big on the server side of things, these vendors fall over each other to make sure their devices work with Linux and have no problem releasing code or at least specs for them so that Linux can have fully GPLed drivers for them out of the box. I (and Linus, as it seems from his interview) believe this will happen with Nvidia and ATI and any other vendors who product binary only drivers for devices geared toward the desktop. Eventually. To make it easier for these vendors to NOT release their code will ensure that they will never release their code. Given how often new graphics cards, in particular, are released, the argument the vendors use about preventing people from using the code to reverse engineer the card and produce a clone is bogus. Has anyone successfully reverse engineered quickly changing hardware using source code for drivers in an economical way and survived as a business? That’s an honest question, but I’m guessing the answer is no.
https://www.osnews.com/story/5459/mini-interview-with-ximians-robert-love/
CC-MAIN-2021-31
refinedweb
4,496
71.65
An abstract widget that corresponds to an HTML form element. More... #include <Wt/WFormWidget> An abstract widget that corresponds to an HTML form element. A WFormWidget may receive focus (see canReceiveFocus()), can be disabled, and can have a label that acts as proxy for getting focus. It provides signals which reflect changes to its value, or changes to its focus. Form widgets also have built-in support for validation, using setValidator(WValidator *). If the validator provide client-side validation, then an invalid validation state is reflected using the style class "Wt-invalid". All validators provided by Wt implement client-side validation. On the server-side, use validate() method to validate the content using a validator previously set. Destructor. If a label was associated with the widget, its buddy is reset to 0. Returns whether the widget can receive focus. Reimplemented from Wt::WWebWidget. Signal emitted when the value was changed. For a keyboard input, the signal is only emitted when the focus is lost Returns the placeholder text (deprecated).. Reimplemented in Wt::WPushButton. Returns whether the form element is read-only. Returns the label associated with this widget. Returns the label (if there is one) that acts as a proxy for this widget. Returns the placeholder::WInteractWidget. Reimplemented in Wt::WPushButton, Wt::WTextEdit, Wt::WDateEdit, and Wt::WTimeEdit.::WPushButton, Wt::WComboBox, Wt::WAbstractToggleButton, Wt::WDoubleSpinBox, and Wt::WAbstractSpinBox.. Reimplemented in Wt::WLineEdit, Wt::WSlider, Wt::WDateEdit, Wt::WDoubleSpinBox, Wt::WAbstractSpinBox, and Wt::WTimeEdit. Sets the placeholder text (deprecated). Sets whether the widget is enabled. A widget that is disabled cannot receive focus or user interaction. This is the opposite of setDisabled(). Sets the hidden state of this widget. If the widget has a label, it is hidden and shown together with this widget. Reimplemented from Wt::WWebWidget. Reimplemented in Wt::WDateEdit, and Wt::WTimeEdit. Sets the placeholder text. This sets the text that is shown when the field is empty. Reimplemented in Wt::WTextEdit. Sets the element read-only. A read-only form element cannot be edited, but the contents can still be selected. By default, a form element area is not read-only. Reimplemented in Wt::WTextEdit. Sets a tooltip. The tooltip is displayed when the cursor hovers over the widget. When textFormat is XHTMLText, the tooltip may contain any valid XHTML snippet. The tooltip will then be rendered using JavaScript. Note: This will set deferred tooltip to false. Reimplemented from Wt::WWebWidget. Sets a validator for this field. The validator is used to validate the current input. If the validator has no parent yet, then ownership is transferred to the form field, and thus the validator will be deleted together with the form field. The default value is 0. Sets the value text. This sets the current value from a string value. Implemented in Wt::WPushButton, Wt::WSlider, Wt::WLineEdit, Wt::WComboBox, Wt::WTextArea, and Wt::WAbstractToggleButton. Returns the tab index. Reimplemented from Wt::WWebWidget. Validates the field. Reimplemented in Wt::WLineEdit, and Wt::WAbstractSpinBox. Signal emitted when the widget is being validated. This signal may be useful to react to a changed validation state. Returns the current value. This returns the current value as a string. Implemented in Wt::WPushButton, Wt::WSlider, Wt::WLineEdit, Wt::WComboBox, Wt::WTextArea, and Wt::WAbstractToggleButton.
https://webtoolkit.eu/wt/doc/reference/html/classWt_1_1WFormWidget.html
CC-MAIN-2016-50
refinedweb
544
54.39
Opened 4 years ago Closed 2 years ago Last modified 15 months ago #12119 closed feature request (wontfix) Can't create injective type family equation with TypeError as the RHS Description For the longest time, I've wanted to make a type family like this injective: type family Foo (a :: *) :: * where Foo Bar = Int Foo Baz = Char But the problem is that Foo is defined on all types of kind *, so the above definition is inherently non-injective. With the introduction of TypeErrors, however, I thought I could rule out all other cases: import GHC.TypeLits type family Foo (a :: *) = (r :: *) | r -> a where Foo Bar = Int Foo Baz = Char Foo _ = TypeError ('Text "boom") But this doesn't work, sadly: Injective.hs:18:3: error: • Type family equations violate injectivity annotation: Foo Bar = Int -- Defined at Injective.hs:18:3 Foo _ = (TypeError ...) -- Defined at Injective.hs:20:3 • In the equations for closed type family ‘Foo’ In the type family declaration for ‘Foo’ Injective.hs:20:3: error: • Type family equation violates injectivity annotation. Type variable ‘_’ cannot be inferred from the right-hand side. In the type family equation: Foo _ = (TypeError ...) -- Defined at Injective.hs:20:3 • In the equations for closed type family ‘Foo’ In the type family declaration for ‘Foo’ Injective.hs:20:3: error: • Type family equation violates injectivity annotation. RHS of injective type family equation cannot be a type family: Foo _ = (TypeError ...) -- Defined at Injective.hs:20:3 • In the equations for closed type family ‘Foo’ In the type family declaration for ‘Foo’ From GHC's perspective, TypeError is just another type family, so it thinks it violates injectivity. But should this be the case? After all, having the RHS of a type family equation being TypeError is, in my perspective, tantamount to making that type family undefined for those inputs. It seems like if we successfully conclude that Foo a ~ Foo b (and GHC hasn't blown up yet due to type erroring), we should be able to conclude that a ~ b. Could this be accomplished by simply adding a special case for TypeError in the injectivity checker? Change History (10) comment:1 Changed 4 years ago by comment:2 Changed 3 years ago by comment:3 Changed 3 years ago by Note: this can be implemented in a dead-simple way: compiler/typecheck/FamInst.hs diff --git a/compiler/typecheck/FamInst.hs b/compiler/typecheck/FamInst.hs index cabfb33..b56b68e 100644 Of course, there's still Simon's point about formalizing this idea, which I have yet to do. comment:4 Changed 3 years ago by Of course, there's still Simon's point about formalizing this idea, which I have yet to do. Yes: formalising it at least into a GHC propoosal would be good: it's a user-facing change. comment:5 Changed 2 years ago by It just occurred to me that the entire premise of this ticket is wrong. I claimed: Replying to RyanGlScott: For the longest time, I've wanted to make a type family like this injective:type family Foo (a :: *) :: * where Foo Bar = Int Foo Baz = Char But the problem is that Foois defined on all types of kind *, so the above definition is inherently non-injective. But the "inherently non-injective" part is totally bogus! In fact, as the code below demonstrates, Foo can be made injective quite easily: {-# LANGUAGE TypeFamilyDependencies #-} data Bar data Baz type family Foo (a :: *) = (r :: *) | r -> a where Foo Bar = Int Foo Baz = Char I don't know why I believed that crazy misconception about injectivity vis-à-vis exhaustivity, but in any case, the entire reason why I was pursuing this feature in the first place has vanished. In light of this, I don't think it's worth adding this much extra complexity to the type family injectivity checker. comment:6 follow-up: 8 Changed 15 months ago by What about the following: data Dim = D2 | D3 | D4 type family ToDim (n :: Nat) = d | d -> n where ToDim 2 = D2 ToDim 3 = D3 ToDim 4 = D4 ToDim n = TypeError ( Text "Error: dimension must be 2, 3 or 4 (given: " :<>: ShowType n :<>: Text ")" ) This seems useful to me, as it allows one to switch easily between two different representations of dimension (which have different uses: with Nats I can do arithmetic, but the explicit enum is more convenient with singletons for instance). I feel like the injectivity annotation should be allowed in this case (but I am not aware of any of the theory which backs injective type families). comment:7 Changed 15 months ago by comment:8 Changed 15 months ago by What about the following: ... This seems useful to me, ... Sure, it's the same structure as the O.P. I feel like the injectivity annotation should be allowed in this case ... It is. Ryan's comment:5 says what's not allowed is the TypeError catch-all equation -- that is, if you want ToDim to be injective. So if you call ToDim 5, inference will get stuck, and you'll get an error message somewhere/somehow else. comment:9 Changed 15 months ago by I understand that the type family is injective if I remove the type error. However I would prefer a custom error message, as opposed to the user getting some confusing error about a stuck type family application. In my opinion, one of the advantages of injectivity annotation is that internal use of the type family doesn't end up leaking into the front-end. comment:10 Changed 15 months ago by The injectivity annotation is a promise that all your equations are injective. Then consider the return type of ToDim 5 compared to ToDim 6 (if you put a TypeError equation): they're the same type. Then that equation is not injective. Specifically, ShowType n is not injective: it's an existentially-quantified data constructor, promoted to a datakind. If you omit the TypeError equation, there's an implicit ToDim n = ToDim n added at the end. That is injective. Possibly you could put some other error-handling logic, that preserves plain n on the rhs, then that would be injective. That looks plausible to me. What is the consequences of declaring Footo be injective? Answer, only that if we have where [W]means "wanted", a constraint we are trying to solve, then we try to prove Assuming we succeed, binding g2 = <some coercion>, then we can prove the first constraint by binding g1 = Foo g2. If Fis not injective, this proof strategy is no unsound; but it may be incomplete. Perhaps there are un-equal types t1and t2for which Foo t1 ~ Foo t2. In your example, it's true that Foo Int ~ TypeError "boom" ~ Foo Bool. So indeed there may be a solution to the constraint Foo t1 ~ Foo t2that does not require t1 ~ t2. But if the proof goes via TypeError, as here, perhaps that particular sort of incompleteness doesn't matter. So it sounds plausible. I don't really know how to formalise it though.
https://trac.haskell.org/trac/ghc/ticket/12119
CC-MAIN-2020-05
refinedweb
1,180
61.16
As Blaine Wastell posted, we are about to release the patterns & practices guidance for building Windows Store business apps. You may be thinking… “What is a business app?” and “How is a business app different than any other app?”. Well the simple answer is that a “business” app is expected to be well architected, maintainable, testable, and have high quality. Business apps may differ from simple content consumption apps in many ways. Business apps are more likely to need data validation. There is probably more state data that needs to survive suspension and termination. Also, it is more likely that business apps will be developed by teams of developers that want a decoupled architecture and a codebase that is maintainable and testable. Basically, if you are interested in improving the quality and maintainability of your Windows Store apps, I recommend taking a look at this guidance. You can get the latest source on CodePlex. This post is part 1 of a series that walks you through how to use the Prism for Windows Runtime library to create a Windows Store app. The Prism library will not only make your apps more testable and maintainable, I believe it will also make it easier to create Windows Store apps. Creating a Hello World app using Prism for Windows Runtime - Create a new C#-based Windows Store app named HelloWorld using the Blank App (XAML) Visual Studio project template. - Add a reference to the Microsoft.Practices.Prism.StoreApps library. - Add a folder named “Views” to the root folder of the Windows Store app project. This app will use the FrameNavigationService class, provided by the Microsoft.Practices.Prism.StoreApps library, to navigate. The FrameNavigationService class's default convention is to locate pages in the "Views" namespace of the Windows Store app project. When the Navigate method of the FrameNavigationService class is called with a page token parameter, "Page" is appended to the page token. For example, if the Navigate method was called with "Customer" as the page token, the FrameNavigationService would look for a page named "CustomerPage" in the "Views" namespace. - Add a new Blank Page named “StartPage” to the Views folder. - Add a TextBlock control to the page's Grid control. <TextBlock Text="Hello World!!!" Style="{StaticResource HeaderTextStyle}" /> - Delete all of the contents of the App.xaml.cs file. This boilerplate functionality is now provided by the MvvmAppBase class. - Add the following code to the App.xaml.cs file. using Microsoft.Practices.Prism.StoreApps; sealed partial class App : MvvmAppBase { public App() { this.InitializeComponent(); } } - Update the App.xaml file so that the App class derives from the MvvmAppBase class rather than the Application class. <Infrastructure:MvvmAppBase xmlns: ... </Infrastructure:MvvmAppBase> - Add code to the App class to navigate to the StartPage. The MvvmAppBase class provides several virtual methods that you can override to customize its behavior. However, MvvmAppBase provides the OnLaunchApplication abstract method for which you must provide an override. This method specifies what your app will do when it's launched. protected override void OnLaunchApplication(LaunchActivatedEventArgs args) { NavigationService.Navigate("Start", null); } The MvvmAppBase has a NavigationService property that exposes the FrameNavigationService. - Run the app. When the app runs you will see the StartPage with the “Hello World!!!” TextBlock. Next let’s take a look at how to associate a view model to the StartPage view in my next post: Creating a basic implementation of the MVVM pattern. Thanks for such a great framework. I agree on that if we interested in improving the quality and maintainability of your Windows Store apps, we should follow this guidance. Could you put this series articles into Microsoft Virtual Academy? Thank you for your hard work. @Kyle: Thanks for the suggestion. I'll look into MVA. May need to record some videos at least. Hi Francis, do you have the code for this Hello World that is current. I am following the steps above but getting a number of errors I would appreciate some help with.
https://blogs.msdn.microsoft.com/francischeung/2013/04/24/prism-for-windows-runtime/
CC-MAIN-2018-26
refinedweb
658
57.87
This article describes how to solve a logic problem using the AI Search technique. The problem is specified like this. On one side of a river, there are three missionaries.) To solve this problem satisfactorily, your program must explicitly identify all of the optimal (i.e., shortest) solutions to the problem. Well, there are different varieties of search which can all be used, such as breadth first, depth first, or iterative deepening. Each of these different search methods has different properties such as whether a result is guaranteed, and how much time and space is needed to carry out a search. This article uses breadth first search, as this search method is guaranteed to find a solution state. Search is generally concerned with looking for a solution, or solutions in a search space of individual search states. Typically, a search space is a list or some sort of collection which contains each of the states that is to be checked to see if it is a solution state. A breath first search relies upon the basic search principles mentioned above. What is of specific interest, is how the breadth first search actually goes about looking for a solution state. To illustrate this, consider the following diagram, where each of the ovals is an individual state: It can be seen from the diagram above that there are a number of states, but how did the search choose which ones to look at for a given solution? With a breadth first search, each state at a given layer is expanded before moving to the next layer of states. In the diagram above, it can be seen that the root state was expanded to find the states A1, A2, and A3. There were no more states at this level, so A1 was picked and expanded, which yielded A1.1 -> A1.3. However, A2 and A3 must also be expanded before looking at these new states A1.1 -> A1.3, so A2 and A3 will be expanded next. If there are no more states at the current level to expand, the first node from the next level is expanded, this carries on until a solution (or solutions) is found. This is the basis of a breadth first search. The following algorithm illustrates how a breadth first search should function when looking for a single solution. This is the basis of the algorithm. But there are some considerations, which are fairly important to any AI search technique. These are explained below. This is a fundamental part of any search algorithm, and is really the key to having a successful search algorithm. For the Missionaries and Cannibals problem, we could consider the following diagram. From the figure above, we can see that several of the states are invalid, so have been pruned (discarded) from the search tree. This means these states will not have successor states generated. For the Missionaries and Cannibals problem, this is simply having all three missionaries and all three cannibals on the opposite side of the river. The demo project attached actually contains a Visual Studio 2005 solution, with the following three classes: Is the main entry point into the CannMissApp application. Essentially, all this class does is call the getSolutionStates method of the SolutionProvider, and then print the solutions (if there are any): getSolutionStates SolutionProvider ..... ..... SolutionProvider S = new SolutionProvider(); ArrayList Solution = S.getSolutionStates(new State("Root", 3, 3, false,1), new State("Goal", 0, 0, true,999999)); printSolutions(Solution); .... .... //This method prints the Solutions returned //by the SolutionProvider object. //However, there may not actually be any solutions to print. // //Once a SolutionProvider is created this //class asks it to provide all the //solutions. If there are any solutions //returned these are then printed // //param : Solution the Solutions returned // by the SolutionProvider object. private static void printSolutions(ArrayList Solution) { //Are there any solutions if (Solution.Count == 0) { Console.WriteLine("\n\nNO SOLUTIONS HAVE BEEN FOUND\r\n"); } else { int Solfound = 1; //show the Solutions for (int i = 0; i < Solution.Count; i++) { State s = (State)Solution[i]; Console.WriteLine("=====FOUND SOLUTION [" + Solfound++ + "]=====\r\n"); Console.WriteLine("This solution was found at level [" + s.getStateLevel() + "]\r\n"); s.Print(); Console.WriteLine("\r\n"); } } } Provides solutions to the "Cannibals and Missionaries" search problem: using System; using System.Collections.Generic; using System.Collections; using System.Text; namespace MissCanApp { #region SolutionProvider CLASS //SolutionProvider - Provides solutions //to the "Cannibals and Missionaries" //Search problem. // //HOW THE SEARCH IS PERFORMED // //This class uses a breadth 1st search //to search for possible solutions. //A ArrayList is used to store the agenda. //The agenda consists of a list of State //classes. The root State will be the 1st //item in the agenda. When the root //is taken from the agenda, it is compared //to the goal state, if it is the goal //state it will be stored within a //SolutionsFound ArrayList. However the 1st node //in the search agenda it not likely to //be the goal so different successor //states will be generated from the root //and these will be added to the agenda. //Each of these states will then be taken //from the agenda and compared to the //goal, if they are equal to the //goal they will be stored within a //SolutionsFound ArrayList. However if they //are not the goal state, they too will //be expanded to create successor states, //which will then be added to the agenda. // //The solutions may be found at various //levels within the search tree. This application //should return the optimal solustions. //To achieve this the 1st solution has its level //within the search tree recorded. Then when new //solutions are found they are compared //to this level, if they are less than or the //same they too are stored as valid optimal //solutions. Only when the solutions found are //found at a higher level does the search know //that it has found all the optimal solutions. //When this occurs the search is ended, and the //optimal solutions returned. class SolutionProvider { // Instance fields private int CURRENT_ROOT_STATE = 0; private ArrayList searchAgenda = new ArrayList(); //SolutionProvider Constructor //Simply creates a new SolutionProvider object public SolutionProvider() { } //Creats a new State based on //a parent state. The formal parameters //supplied dictate what changes are //to be applied to the parent //state to form the new child state // //@param : StateName represents the new state name // //param : parent is the parent State //that this State should be generated from. //Ie the new State will be a child of this parameter // //param : nMiss number of Missionaries //in the boat for the new state // //param : nCan number of Cannibals in the boat for the new state private void addStateToAgenda(String StateName,State parent, int nMiss, int nCan) { // BoatDirection holds either 1 or -1, depending on the side. int BoatDirection = parent.Side ? 1 : -1; //Get the name of the parent state and add append the new state //suffix to it String newStateName = parent.getStateName() + StateName; //Create a new state based on the parent state parameter that was //supplied when calling this method State newState = new State(newStateName, parent.nMiss + nMiss * BoatDirection, parent.nCan + nCan * BoatDirection, !parent.Side, parent, parent.getStateLevel() + 1); //Try and add the newly generated State to the search agenda addStateToAgenda(newState); } //Tries to add the NewState parameter provided to the search agenda. //If the state parameter provided is //not a valid state, the state will not //be added to the agenda. The State //class deals with checking for a ValidState // //param : NewState the state to try and add it to the search agenda private void addStateToAgenda(State newState) { // Dont allow invalid states to be added to search agenda if (newState.InvalidState()) return; //Valid state so add it to the agenda searchAgenda.Add(newState); } //This is the main method that does most of the work. It carries out //various tasks. These are described below // //The 1st thing that is done is the //internal SolutionsFound collection is //initialized and the StartState //(From the formal parameter) is added to the //Search Agenda // //Then a loop is enetered that loops through //the entire agenda taking off //the 0 element when 1st entering the loop. //For readability I have defined //the 0 elemenet to be an int called //CURRENT_ROOT_STATE. This variable is //a simply int that is set to 0 when //the SolutionProvider class is constructed. // //When the CURRENT_ROOT_STATE element //is removed from the Search Agenda it is //cast to a State then compared to //the GoalState (From the formal parameter). //If it equals the goal state (Dealt //with by the State class) and is the 1st //solution found the level of the state //is recorded, and the state is stored //within the SolutionsFound collection. //If this is not the 1st solution found //the state level of this new solution //is compared to the recorded state level //of the 1st solution. If this solution //is less than or equal to the recorded //optimal level, then it too may be added //to the SolutionsFound collection. //However if it is not the goal state, //which is will not be for the 1st State //(ROOT) then new succeessor nodes will //need to be created based upon this parent //node. The generateSucessors method deals with this. // //param : StartState the StartState, with all Cannibals/Missionaries //on one side of the river // //param : EndState the StartState, with all Cannibals/Missionaries //on the opposite side of the river // //return : ArrayList that holds all the solutions found public ArrayList getSolutionStates(State StartState, State EndState) { //initialise local fields int optimalSolfoundAtLevel = 0; bool allOptimalSolutionsFound = false; bool foundFirstSolution = false; //Initialise SolutionsFound collection ArrayList Solutions = new ArrayList(); //Add StartState to the Search Agenda addStateToAgenda(StartState); //Loop through search agenda until //we have found all the optimal solutions while (searchAgenda.Count > 0 && !allOptimalSolutionsFound) { //Get the current root state from the Search Agenda State CurState = (State)searchAgenda[CURRENT_ROOT_STATE]; //Remove the current root state from the Search Agenda, is has been //dealt with now searchAgenda.RemoveAt(CURRENT_ROOT_STATE); //Is the current root state the Goal State if (CurState.Equals(EndState)) { //Has the 1st solution been found if (foundFirstSolution) { //YES the 1st solution was found so lets //compare this states level to the existing level //from when the 1st solution was found if (CurState.getStateLevel() <= optimalSolfoundAtLevel) { //If it is, store the state in //the SolutionsFound collection Solutions.Add(CurState); } else { //since the current state level is //greater than the optimalSolfoundAtLevel //this solution must be more costly, //we must have already found all the optimal solutions. //so need to break out of loop, so set break condition allOptimalSolutionsFound =true; } } else { //At this point this must be the 1st solution //found, so store it and record its level //in the optimalSolfoundAtLevel, so that this //can be used to compare against //for the next solutions found. //Also prevent this logic froming running //again by setting the foundFirstSolution to true. foundFirstSolution=true; optimalSolfoundAtLevel = CurState.getStateLevel(); Solutions.Add(CurState); } } else { //The current root state is NOT Goal State, so create //sucessor states based on it generateSucessors(CurState); } } return Solutions; } //This method simply calls the addStateToAgenda method //passing in all required derivations of the CurState state //to be new sucessor nodes // //param : CurState the current state //to use to create sucessor nodes from private void generateSucessors(State CurState) { //if this method has been called the CurState, was NOT the goal, //so need to create new sucessor //states based on it. So try and create //some new states. int nCan, nMiss =0; int stateName =1; //loop through all possible combinations for (int i = 0; i <= Program.boatsize; i++) { for (int j = 0; j <= Program.boatsize; j++) { //prune the search tree, getting rid of invalid states if (i==0 && j ==0) continue; if (i + j > Program.boatsize) break; //good state found, so add to agenda nMiss = i; nCan = j; addStateToAgenda("_" + stateName++, CurState, nMiss, nCan); } } } } //End of SolutionProvider class #endregion } Is a representation of a node with the Search Agenda. Each State holds various bits of data which help to model a specific state, such as number of missionaries, number of cannibals, the side the boat is on etc. State using System; using System.Collections.Generic; using System.Text; namespace MissCanApp { #region State CLASS // State - Is a representation of a node //with the Search Agenda. Each State // holds various bits of data which help to model a specific state. // These data elements are described below. // // Number of Missionaries within the current state // // Number of Cannibals within the current state // // Side that the boat is on. False is one side, True is the other side // // Name of the State. This will be expanded //upon for each successor state // so that a full StateName can be printed, to show the full search path // that this state used to get to where it is now // // PrevState is the previous state, this is the parent state from which this // state was created. This will be null for the ROOT state, as it does not // have a previous state // State Level is the level that this states in on in the search tree class State { // Instance fields public int nMiss, nCan; public bool Side; private int NumOfEachAtStart = 3; private String Name; private State PrevState; private int stateLevel = 0; //State Constructer (1), Use this for the root state //Simply creates a new State with the name, number of Missionaries, //number of Cannibals and side to match the values supplied by //the formal parameters. In this case there will be no PrevState as this //is the 1st state // //param : Name is the name for this State //param : nMiss the number on Missionaries for this state //param : nCan the number on Cannibals for this state //param : Side the side of the river that the boat is now on //param : stateLevel the level this state is on, // 0=root / 1st layer, 1 = 2nd layer, 2 = 3rd layer public State(String Name, int nMiss, int nCan, bool Side, int stateLevel) : this(Name, nMiss, nCan, Side, null, stateLevel) { //Call the overloaded constructor with the formal parameters //provided, but make PrevState=null, as the 1st State does not //have a PrevState to point to //this(Name, nMiss, nCan, Side, null, stateLevel); } //State Constructer (2), Use this to //create States based upon other States //Simply creates a new State with the name, number of Missionaries, //number of Cannibals,side and PrevState to match the values supplied by //the formal parameters. In this case PrevState will be a pointer to this //nodes parent node // //param : Name is the name for this State // //param : nMiss the number on Missionaries for this state // //param : nCan the number on Cannibals for this state // //param : Side the side of the river that the boat is now on // //param : PrevState a pointer to this State's PrevState (parent) // //param stateLevel the level this state is on, // 0=root / 1st layer, 1 = 2nd layer, 2 = 3rd layer public State(String Name, int nMiss, int nCan, bool Side, State PrevState, int stateLevel) { //Assign parameters to local instance fields this.Name = Name; this.nMiss = nMiss; this.nCan = nCan; this.Side = Side; this.PrevState = PrevState; this.stateLevel = stateLevel; } //Simply returns this States stateLevel // //return : int representing this States stateLevel public int getStateLevel() { return this.stateLevel; } //Simply returns this States name // //return : String representing this States name public String getStateName() { return this.Name; } //Prints a full search path of how this state came to be at the //goal state. Makes use of the PrevState to recursively call //the Print method until there is no PrevState. This way each //State only prints its own data public void Print() { //Check that there is a PrevState, Root node will not have one, so //that is when all states from Goal - to start have been printed if (PrevState != null) { //Use recursion to allow Previous //state to print its own data paths PrevState.Print(); } //Use the conditional operator to figure out what side we are on String WhatSide = Side ? " BOAT RIGHT->" : "<-BOAT LEFT "; //Print the current state. Console.WriteLine(nMiss + "M/" + nCan + "C " + WhatSide + " " + (NumOfEachAtStart - nMiss) + "M/" + (NumOfEachAtStart - nCan) + "C"); } //Simply returns true is 2 states are the same // //param : StateToCheck is the State to check against // //return : True if the number of Missionaries, //number of Cannibals and //Side are the same for this State //and the StateToCheck against. Otherwise //false is returned public bool Equals(State StateToCheck) { return (nMiss == StateToCheck.nMiss && nCan == StateToCheck.nCan && Side == StateToCheck.Side); } //Simply returns true if this State is a valid state //This method makes use of the command line argument that //specfies whether there should be more //Cannibals than Missionaries, //OR whether there should be more //Missionaries than Cannibals. Either //way it uses this global flag to work //out if the state is valid for the //given choice that the user made //when running this application. // //return : True only if the number //of PersonType1 does not outnumber //the PersonType2 in this state. //The allocation of whom PersonType1 and //PersonType2 are, is governed by //the command line argument to this //application. public bool InvalidState() { int PersonType1 = 0; int PersonType2 = 0; //Check to see if the user requested //that there be more Cannibals than //Missionaries. If this is the case set //PersonType variables for this //situation if (Program.CanOutnumberMiss) { PersonType1 = nCan; PersonType2 = nMiss; } //Otherwise set the siutation to be that //there be more Missionaries than //Cannibals else { PersonType1 = nMiss; PersonType2 = nCan; } // Check for < 0, which could actually // happen unless it is checked for here if (nMiss < 0 || nCan < 0 || nMiss > NumOfEachAtStart || nCan > NumOfEachAtStart) return true; //Do PersonType2 outnumbers //PersonType1(only worry when there is at least //one PersonType1) one Side1 if (PersonType1 < PersonType2 && PersonType1 > 0) return true; //Do PersonType2 outnumbers PersonType1 //(only worry when there is at least //one PersonType1) one Side2 if ( (NumOfEachAtStart - PersonType1 < NumOfEachAtStart - PersonType2) && (NumOfEachAtStart - PersonType1 > 0)) return true; //At this point the State must be valid return false; } } //End of State class #endregion } When run, the application will print all valid solutions that were found. It will find more than one. The results should look something like the following: I hope this article is of interest to someone. ... 2M/2C <- BOAT LEFT 1M/1C 0M/2C BOAT RIGHT -> 3M/1C 0M/3C <- BOAT LEFT 3M/0C ... General News Suggestion Question Bug Answer Joke Praise Rant Admin Use Ctrl+Left/Right to switch messages, Ctrl+Up/Down to switch threads, Ctrl+Shift+Left/Right to switch pages.
https://www.codeproject.com/Articles/16234/AI-Search-to-Solve-the-Missionaries-and-Cannibals
CC-MAIN-2018-39
refinedweb
3,035
58.92
Python has an extremely rich and healthy ecosystem of data science tools. Unfortunately, to outsiders this ecosystem can look like a jungle (cue snake joke). In this blog post I will provide a step-by-step guide to venturing into this PyData jungle. What's wrong with the many lists of PyData packages out there already you might ask? I think that providing too many options can easily overwhelm someone who is just getting started. So instead, I will keep a very narrow scope and focus on the 10% of tools that allow you to do 90% of the work. After you mastered these essentials you can browse the long lists of PyData packages to decide which to try next. The upside is that the few tools I will introduce already allow you to do most things a data scientist does in his day-to-day (i.e. data i/o, data munging, and data analysis). Installation¶ It has happened quite a few times that people came up to me and said "I heard Python is amazing for data science so I wanted to start learning it but spent two days installing Python and all the other modules!". It's quite reasonable to think that you have to install Python if you want to use it but indeed, installing the full PyData stack manually when you don't know which tools you actually need is quite an undertaking. So I strongly recommend against doing that. Fortunately for you, the fine folks at Continuum have created the Anaconda Python distribution that installs most of the PyData stack for you, and the modules it doesn't provide out of the box can easily be installed via a GUI. The distribution is also available for all major platforms so save yourself the two days and just use that! IPython Notebook¶ After Python is installed, most people start by launching it. Again, very reasonable but unfortunately dead wrong. I don't know a single SciPythonista that uses the Python command shell directly (YMMV). Instead, IPython, and specifically the IPython Notebook are incredibly powerful Python shells that are used ubiquitously in PyData. I strongly recommend you directly start using the IPython Notebook (IPyNB) and don't bother with anything else, you won't regret it. In brief, the IPyNB is a Python shell that you access via your web browser. It allows you to mix code, text, and graphics (even interactive ones). This blog post was written in an IPyNB and it's rare to go find a talk at a Python conference that does not use the IPython Notebook. It comes preinstalled by Anaconda so you can just start using it. Here's an example of what it looks like: print('Hello World') Hello World This thing is a rocket -- every time I hear one of the core devs talk at a conference I am flabbergasted by all the new things they cooked up. To get an idea for some of the advanced capabilities, check out this short tutorial on IPython widgets. These allow you to attach sliders to control a plot interactively: from IPython.display import YouTubeVideo YouTubeVideo('wxVx54ax47s') # Yes, it can also embed youtube videos. Pandas¶ Normally, people recommend you start by learning NumPy (pronounced num-pie, not num-pee!) which is the library that provides multi-dimensional arrays. Certainly this was the way to go a few years ago but I hardly use NumPy at all today. The reason is that NumPy became more of a core library that's used by other libraries which provide a much nicer interface. Thus, the main library to use for working with data is Pandas. It can input and output data from all kinds of formats (including databases), do joins and other SQL-like functions for shaping the data, handle missing values with ease, support time series, has basic plotting capabilities and basic statistical functionality and much more. There is certainly a learning curve to all its features but I strongly suggest you go through most of the documentation as a first step. Trust me, the time you invest will be set off a thousand fold by being more efficient in your data munging. Here are a few quick tricks to whet your appetite: import pandas as pd df = pd.DataFrame({ 'A' : 1., 'B' : pd.Timestamp('20130102'), 'C' : pd.Series(1, index=list(range(4)), dtype='float32'), 'D' : pd.Series([1, 2, 1, 2], dtype='int32'), 'E' : pd.Categorical(["test", "train", "test", "train"]), 'F' : 'foo' }) df Columns can be accessed by name: df.B 0 2013-01-02 1 2013-01-02 2 2013-01-02 3 2013-01-02 Name: B, dtype: datetime64[ns] Compute the sum of D for each category in E: df.groupby('E').sum().D E test 2 train 4 Name: D, dtype: int32 Doing this is in NumPy (or *gasp* Matlab!) would be much more clunky. There's a ton more. If you're not convinced, check out 10 minutes to pandas where I borrowed this from. Seaborn¶ The main plotting library of Python is Matplotlib. However, I don't recommend using it directly for the same reason I don't recommend spending time learning NumPy initially. While Matplotlib is very powerful, it is its own jungle and requires lots of tweaking to make your plots look shiny. So instead, I recommend to start using Seaborn. Seaborn essentially treats Matplotlib as a core library (just like Pandas does with NumPy). I will briefly illustrate the main benefits of seaborn. Specifically, it: - creates aesthetically pleasing plots by default (for one thing, it does not default to the jet colormap), - creates statistically meaningful plots, and - understands the pandas DataFrameso the two work well together. While pandas comes prepackaged with anaconda, seaborn is not directly included but can easily be installed with conda install seaborn. %matplotlib inline # IPython magic to create plots within cells import seaborn as sns # Load one of the data sets that come with seaborn tips = sns.load_dataset("tips") sns.jointplot("total_bill", "tip", tips, kind='reg'); As you can see, with just one line we create a pretty complex statistical plot including the best fitting linear regression line along with confidence intervals, marginals and the correlation coefficients. Recreating this plot in matplotlib would take quite a bit of (ugly) code, including calls to scipy to run the linear regression and manually applying the linear regression formula to draw the line (and I don't even know how to do the marginal plots and confidence intervals from the top of my head). This and the next example are taken from the tutorial on quantitative linear models. Works well with Pandas DataFrame¶ Data has structure. Often, there are different groups or categories we are interested in ( pandas' groupby functionality is amazing in this case). For example, the tips data set looks like this: tips.head() We might ask if smokers tip differently than non-smokers. Without seaborn, this would require a pandas groupby together with the complex code for plotting a linear regression. With seaborn, we can provide the column name we wish to split by as a keyword argument to col: sns.lmplot("total_bill", "tip", tips, col="smoker"); Pretty neat, eh? As you dive deeper you might want to control certain details of these plots at a more fine grained level. Because seaborn is just calling into matplotlib you probably will want to start learning this library at that point. For most things I'm pretty happy with what seaborn provides, however. Conclusions¶ The idea of this blog post was to provide a very select number of packages which maximize your efficiency when starting with data science in Python. Further reading¶ - Curated decibans of scientific programming resources in Python -- very comprehensive list of Python modules. - Python for Data Analysis -- the book by Wes McKinney (Pandas' creator). - The Open Source Data Science Masters -- many useful links to various resources. Acknowledgements¶ Thanks to Katie Green and Andreas Dietzel for helpful feedback on an earlier draft.
http://twiecki.github.io/blog/2014/11/18/python-for-data-science/
CC-MAIN-2018-09
refinedweb
1,334
62.27
Hello all, My config is :- Forms [32 Bit] Version 10.1.2.0.2 (Production) Oracle Database 11g Release 11.2.0.1.0 - 64bit Production I can easily run my on Windows and Linux platform untill i am not use java bean in my form. After using the getMacaddress JAVA BEAN. I am not able to run my form on linux platform. If i remove the java bean from my form than form will run on linux. I am not familiar with linux so i am not able to trace the problem basicaly i don't know which version of jre is used by linux system and i don't know how to find it. Please guide me. I don't know what the getMacaddress Java Bean does, but did you take a look at the source if you have it? Maybe it runs some platform specific stuff? If it calls e.g. ipconfig this wouldn't work under Linux. cheers The jre is a browser plugin so the version is found the same way as it is on windows more or less, by asking the browser. Suppose they had firefox or chrome, it would be about:plugins. If it's some other browser like nautilus etc then you would need to research that browser to find out how to tell the plugin version. Plan b query it from the applet. This web site has very good info about the jre plugin detection: (Fascinating what they say about deployment toolkit (a plugin in the browser). I have had the biggest problems ever with that software. Once it gets installed there is no way removing it. I'd highly recommend NEVER allowing the deployment toolkit to get installed in any browser or you can spend many hours trying to remove it. No one knows what it does but I can tell you that it is highly capable of stopping the jre of ever being called successfully. The worst ever problem is the user having installed the jre 7 with the deployment toolkit and then you are trying to get rid of 7 and go back to 6. With some versions of the jre 7 it was nearly utterly impossible to get rid of it. No software program should be deployment that is unremovable! ) I can create possibly the same type of problem on windows with my bean (that never works. Way to go that you got one to work ever!) that it causes some kind of loop or hanging in the jre and then it is toast afterward. Java has to be killed on the client in that case which takes expertise with the operating system in question. Beans are hard to do. I know Francois works very hard to help the galaxy with beans. Thanks Francois! But we could use more resources so more customers could make more forms enhancements via the bean (or javascript or html5 etc etc) route. Thanks for your reply, My Java code is [PRE] import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.net.InetAddress; import java.net.UnknownHostException; import oracle.forms.ui.*; import oracle.forms.properties.*; public class getMacAddress extends VBean { public getMacAddress() { System.out.println("Bean has been initialised"); } /* public static void main(String[] args) { String res = getAddress(); System.out.println(res); } */ public String getAddress() { String macAddress = null; String getAddress = null; Process p = null; BufferedReader in = null; try { String osname = System.getProperty("os.name"); if (osname.startsWith("Windows")) { p = Runtime.getRuntime().exec( new String[] { "ipconfig", "/all" }, null); } // Solaris code must appear before the generic code else if (osname.startsWith("Solaris") || osname.startsWith("SunOS")) { String hostName = getFirstLineOfCommand(new String[] { "uname", "-n" }); if (hostName != null) { p = Runtime.getRuntime().exec( new String[] { "/usr/sbin/arp", hostName }, null); } } else if (new File("/usr/sbin/lanscan").exists()) { p = Runtime.getRuntime().exec( new String[] { "/usr/sbin/lanscan" }, null); } else if (new File("/sbin/ifconfig").exists()) { p = Runtime.getRuntime().exec( new String[] { "/sbin/ifconfig", "-a" }, null); } if (p != null) { in = new BufferedReader(new InputStreamReader( p.getInputStream()), 128); String l = null; while ((l = in.readLine()) != null) { macAddress = parse(l); if (macAddress != null && parseShort(macAddress) != 0xff) break; } } } catch(IOException e) { e.printStackTrace(); } catch(Exception e) { e.printStackTrace(); } return macAddress; } public String parse(String in) { int hexStart = in.indexOf("0x"); if (hexStart != -1 && in.indexOf("ETHER") != -1) { int hexEnd = in.indexOf(' ', hexStart); if (hexEnd > hexStart + 2) { return in.substring(hexStart, hexEnd); } } int octets = 0; int lastIndex, old, end; lastIndex = in.lastIndexOf('-'); if (lastIndex > in.length() - 2) return null; end = Math.min(in.length(), lastIndex + 3); ++octets; old = lastIndex; while (octets != 5 && lastIndex != -1 && lastIndex > 1) { lastIndex = in.lastIndexOf('-', --lastIndex); if (old - lastIndex == 3 || old - lastIndex == 2) { ++octets; old = lastIndex; } } if (octets == 5 && lastIndex > 1) { return in.substring(lastIndex - 2, end).trim(); } return null; } public short parseShort(String s) throws NullPointerException { s = s.toLowerCase(); short out = 0; byte shifts = 0; char c; for (int i = 0; i < s.length() && shifts < 4; i++) { c = s.charAt(i); if ((c > 47) && (c < 58)) { out <<= 4; ++shifts; out |= c - 48; } else if ((c > 96) && (c < 103)) { ++shifts; out <<= 4; out |= c - 87; } } return out; } public String getFirstLineOfCommand(String[] commands) throws IOException { Process p = null; BufferedReader reader = null; try { p = Runtime.getRuntime().exec(commands); reader = new BufferedReader(new InputStreamReader( p.getInputStream()), 128); return reader.readLine(); } finally { if (p != null) { if (reader != null) { try { reader.close(); } catch (IOException ex) {} } try { p.getErrorStream().close(); } catch (IOException ex) {} try { p.getOutputStream().close(); } catch (IOException ex) {} p.destroy(); } } } } [/PRE] This code handle the both Linux and Windows. Basically i faced the problem in linux platform, i don't know how to check java version in Linux and how to install the new java version. Since you are using Forms 10.1.2 and JInit isn't an option anymore considering it doesn't work with nowadays browsers I suppose using JPI >= 1.6.x. is feasible if you are not already using it. So instead you could use NetworkInterface.getHardwareAdress: you could get a list of all Interfaces available too: NetworkInterface (Java Platform SE 6) cheers i don't know how to check java version in Linux and how to install the new java version. java -version Maybe install a newer version if needed. Then this: sounds like a good start. cheers As per my knowledge oracle jdeveloper 10 g support the JPI >= 1.5.x. and NetworkInterface.getHardwareAdress is supported by the JPI >= 1.6.x. Sir, As per your suggestion i am working on your given link. JDeveloper isn't tied to a specific JDK Version; you'd simply have to install a newer version and add it under Tools => Manage Libraries => J2SE Definitions. cheers After doing the lots of afford i have done it. This link is really helpful. [code] Maybe install a newer version if needed. Then this: [/code] Sir now i am able to run the form on linux system. Kindly guide me how to update the JDK version of JDeveloper. Please provide the steps. Still i am not able to get the Linux system Mac Id. After update the JDK may be i able to use NetworkInterface.getHardwareAdress. As said in my previous post: download a JDK (e.g. from here: Java Archive Downloads - Java SE 6) and install it. Then add it under Tools => Manage Libraries => J2SE Definitions. cheers Now i install the jdk 1.6.0_20 and add into the jdeveloper 10.1.3. and i change my code for get mac id is - package demo; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.SocketException; import java.net.UnknownHostException; public class App{ public static void main(String[] args){ InetAddress ip; try { ip = InetAddress.getLocalHost(); System.out.println("Current IP address : " + ip.getHostAddress()); NetworkInterface network = NetworkInterface.getByInetAddress(ip); byte[] mac = network.getHardwareAddress(); System.out.print("Current MAC address : "); StringBuilder sb = new StringBuilder(); for (int i = 0; i < mac.length; i++) { sb.append(String.format("%02X%s", mac[i], (i < mac.length - 1) ? "-" : "")); } System.out.println(sb.toString()); } catch (UnknownHostException e) { e.printStackTrace(); } catch (SocketException e){ e.printStackTrace(); } } } in form i create one jave bean area and add the implement class is demo.get_info. and on B1 (button) when-button-pressed :T1 :=GET_CUSTOM_PROPERTY('BEAN_MAC',1,'get_info'); No error comes and no out put comes. So please guide me how to use the getHardwareAddress to get mac id.
https://community.oracle.com/message/11109024
CC-MAIN-2014-15
refinedweb
1,394
61.22
It's the continuing balance between ease-of-use and generality. At a literal level, you can't use the same tree items in multiple places in the tree, because then various properties would become ambiguous, properties like TVGN_PARENT or TVIS_EXPANDED. (If a tree could be in two places, then it would have two parents, for example.) Of course, this problem could have been solved by separating the item content from the item presence. So instead of just having an HTREEITEM, there would be, say, HTREENODE and HTREENODECONTENTS. The node would represent a physical location in the tree, and the item contents would represent the contents of that node: its name, icon, etc. Sure, that could have been done, but remember the balance. You're making the common case hard in order to benefit the rare case. Now everybody who is manipulating treeviews has to worry about twice as many objects (what used to be one item is now a node plus contents). This is generally not the balance you want to strike when designing an interface. When you design an interface, you want to make the common case easier than the rare case. A program that wants this separation can, of course, do the separation manually. Put all the contents in a separate shareable structure and have your HTREEITEMs refer to that shared structure in their lParams. This is more work for the program, but now the cost is being shouldered by the one who wants the extra functionality. Other than the lack of multiselect, I have to say the tree control is one of the most straightforward of the common controls. I’ve never done anything serious in windows GUI programming, but is this related? Yesterday I just discovered something. Do not name a folder on your Windows Desktop as "desktop". Why? Because it leads to some crazy behavior in a special case. Here are the steps. It happens on Windows 2000 Professional and Server, and Windows XP Home/Professional at least. – Open explorer in the folders view. (Press the Win-E combo.) – Right click in the list pane and create a new folder. Rename it to "desktop". – Right click in the list pane and create another new folder. – "Go in" the last created folder. – Create another new folder or a new file here using the same old context menu. – Press F5 (once or twice). In the Folder tree view on left you’ll see empty entries. The only use of this explorer window now is to create more such empty entries using refresh-view (F5). Clicking on "C:" for example leads you to no where. Try it. The special case is not very special. Later whenever you create a new file/directory and refresh, you’ll see it. Sometimes a dialog pops up saying "c:…..New Folder" etc.. refers to a location that is not available. Parts of TreeView make the common case hard to benefit the rare case though – which is kind of odd. Namely, there are two images for each TreeView item – one for when the item is selected, and one for when it’s not. I’ve never seen this used by anyone. However, the common case used throughout the OS and apps is that you want a different image when the node is opened than when it’s closed. It’s a really strange design choice to have it the way it’s actually implemented. Of course, these days you’d probably want all the permutations of open/closed/selected/unselected/hot/cold… Chetan: It’s not related. The special name "desktop" (lousy choice – should have been a GUID for the filename) is used by the shell, that’s all. Simon, Think of the case when a tree view is driving a more detailed view pane, also on-screen. In this case, it makes sense for the selection to change the icon. For example, if the "folder" in the tree view is on display in the list pane, as in Explorer, it makes sense for that folder to appear "open". On the other hand, I’d expect the open/close status of a tree view to be indicated by the expand/collapse widget showing either + or -, and also by the fact that there are items displayed beneath a given item! I’d say a different icon for selected is the more useful case. Wouldn’t it have been possible to add an additional struct containing the content information and an additional method that would allow you to populate the node with the struct so that the creation of nodes would be simpler. I.e. you could have gone with both approaches. Orion Adrian Once you allow a tree node to be in several different places of the tree, it is no longer a tree. Now try to explain Joe Random User what a DAG is and why it works :) Johan: Never mind – I had a brain fart and confused the two models. I was under the impression that Explorer used the common-case I was describing – it doesn’t. For comparison, GtkTreeView is a concrete example that uses the Model/View/Controller approach. I’d like to know why many treeview (and listview) notification messages send a structure that contains fields that aren’t properly filled in. Eg the lParam field isn’t supplied, or coordinates aren’t correct for custom draw. Things like that. Any reasonably complicated program is surely going to use the lParam field for its own data – wouldn’t the "common case" justify providing this rather than necessitating an additional call to TVM_GETITEM, etc? "When you design an interface, you want to make the common case easier than the rare case." Hear, hear. And I don’t think it’s analogous to model-view. Your model is probably not stored in the tree item — the item is more a placeholder for content, much as buttons or menu labels are. You wouldn’t *really* put your controller code directly in btn1_onClick, would you? Nor would you store information in the button object itself. I think of tree items as containing references to data (or actions). Chetan: You don’t say *where* you’re creating these folders. A few weeks ago I uninstalled some app that actually deleted the physical Desktop folder under <root>:Documents And Settings<myid>. In the Win32 shell there are two concepts of a "desktop". This physical folder (which allows you to have a messy desktop with things in it) and the shell namespace root, which is not a real folder. If you delete the physical folder, every time you try to refresh the "screen desktop" you’ll get an error saying that the location refers to something that is unavailable. If you re-create the physical desktop folder then the problem goes away. Jon: Two things: Firstly, you need to check the flags to see which parts of the structure are valid; not all flags are set for each notification message. Secondly, the reason for not using lparam for this is because… well… what do you do if you’re not in the same process as the treeview control? Your lParam data would be invalid. Jon: I just spent fifteen minutes going through all the treeview notifications and all of the ones that refer to items do set the lParam (at least that’s what it looked like to me), so I’m not sure which notifications you’re talking about. If any fail to set it, it’s just an oversight. Simon: Yeah, the selected image thingie violates the principle of "simple is easy". Nobody’s perfect. LVN_DELETEITEM and LVN_GETDISPINFO are two examples where the listview doesn’t supply the lParam value and obviously should. The coordinates supplied for NM_CUSTOMDRAW are sometimes invalid under XP. Ok, these are probably just oversights but they’re annoying nonetheless :)
https://blogs.msdn.microsoft.com/oldnewthing/20040701-00/?p=38613
CC-MAIN-2017-47
refinedweb
1,311
72.56
Opened 12 years ago Closed 12 years ago Last modified 12 years ago #2192 closed defect (fixed) [patch] DateField with unique=True breaks Description The following model breaks, when I try to add an object in the admin. from django.db import models class Foo(models.Model): the_date = models.DateField('Watch me screw up', unique=True) class Admin: pass With the following error: AttributeError at /admin/foo/foo/add/ 'str' object has no attribute 'strftime' Request Method: POST Request URL: Exception Type: AttributeError Exception Value: 'str' object has no attribute 'strftime' Exception Location: /home/anonymous/django/latest/django/db/models/fields/__init__.py in get_db_prep_lookup, line 415 Attachments (2) Change History (9) comment:1 Changed 12 years ago by comment:2 Changed 12 years ago by comment:3 Changed 12 years ago by comment:4 Changed 12 years ago by This only happens with DateField. DateTimeField and TimeField work fine. The cause is the following lines added in r2517 to db.models.fields.DateField.get_db_prep_lookup: elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte', 'ne'): value = value.strftime('%Y-%m-%d') I don't follow the commit log, but it seems odd this would be needed just for DateFields and not the rest. The following is a workaround if the lines need to stay. Just convert back to datetime.date if needed. Patch: --- django/db/models/fields/__init__.py (revision 3194) +++ django/db/models/fields/__init__.py (working copy) @@ -412,6 +412,8 @@ if lookup_type == 'range': value = [str(v) for v in value] elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte', 'ne'): + if type(value) is str: + value = self.to_python(value) value = value.strftime('%Y-%m-%d') else: value = str(value) Changed 12 years ago by Convert string to datetime.date in DateField.get_db_prep_lookup if needed. comment:5 Changed 12 years ago by Change Summary to indicate patch. (Sorry about the first inline patch.) Changed 12 years ago by New version of patch. Just check for string first, rather than convert to object, only to go back to string. comment:6 Changed 12 years ago by this patch seemed to do the trick for me After some poking around, it looks like the problem here is that when django.db.models.fields.manipulator_validator_uniquetries to do a lookup to see if an object with the same date exists, it's not turning the string into a datetime.dateobject, just continuing to handle it as a string. I'd imagine that trying this with a DateTimeFieldor TimeFieldwould yield similar problems. So it looks like we need to do some introspection of the field type in there and convert to an appropriate type for doing the lookup.
https://code.djangoproject.com/ticket/2192
CC-MAIN-2018-43
refinedweb
443
57.27
I think you are laboring under a misapprehension. You can only use the XSLT2 Cache functionality through the Java Gateway, originating your request from the cache server. It's s server-side technology. You can use XSLT extensions in Atelier ( or any other tool ) to test and debug your XSLT on filesystem files but you won't be able to use the server callback isc:evaluate() within those stylesheets. This will be supported in the next release of Atelier 1.1 You can have multiple projects in your workspace and thats the way to go. Below is an overview of the synchronization strategy. You will see if that someone changes files on the server underneath you then conflicts will occur. With Atelier you need to focus on the fact that the source on the client is the 'source of truth' the server is where you run your code. Your workflow should be sourcecontrol -> client -> server. Synchronization Services for the New Development Paradigm This document describes the current approach to client/server synchronization. Documents may be edited on the client independently of a connection to a Caché server. However, when a connection is established with a server for the purposes of saving, compiling and running code, then the client and server must agree with respect to the version of documents that are being operated upon. Each document that exists on a server has a hash value associated with it. Each time the document changes, the hash value also changes. The hash value represents the server's version of the document. It's important to keep in mind that there is only ever ONE current version of a document on a server. The hash value is an opaque value, as far as the user is concerned it's just a 'cookie'. All that is necessary when a client pushes a document to the server, is for the client to specify (via the hash) the version of the document it is replacing on the server. If the current server hash and the hash passed to the server from the client are equal then there is a MATCH on version and the server can go ahead and SAVE the document. If the hashes don't MATCH then there is a CONFLICT and the document will NOT be saved on the server. * The key idea here is that you cannot successfully push a document from a client to a server unless you identify by version the server document you are replacing. The hashes allow you to do that. * In the case of creating a new file, the client will not know the server hash. In this case it just passes the empty string to the server as the server hash value. If the server does not have that document (that is, it is new to the server too) then the hashes will MATCH, the operation will succeed and the server will return a new document hash. If the server already has the document, then there is a CONFLICT, the operation will not succeed. Conflicts have to be resolved before a document can be modified on the server. How the conflict is actually resolved really does not matter (one could pull the server version, merge the diffs or do whatever). What is important is that when the resolution on the client has been done, the client must update its cached server hash to reflect the current server version. This means that when the client passes that hash value back to the server on the next save, the versions will match and the modification will go ahead. In the case of deleting a file, the client must once again specify the server hash (if it has it). A MATCH on the server will result in the document being deleted, a MISMATCH will result in a CONFLICT. All this is predicated on the client's ability to cache the hashes according to client source file, server address, server namespace and server document. How the data is stored is not important, what is important is that the hashes must be cached and passed to the server when a document modification is requested. It's also important that the hash cache is persisted in some sort of client database. If not, then the synchronization of sources would have to be redone each time an interaction with a server is initiated. The relationship between client and server sources is shown in this diagram Server Sync does take some time. This is mitigated for intersystems distributed library databases by pre-populating meta data and placing it on the server. See for example, {serverdir}/atelier/CACHELIB/Metadata.zip. If you have libraries of code then you can generate your own metadata.zip and place it on the server under the appropriate directory. You can call %Atelier.v1.Utils.MetaData.Build(databasename) and then add the generated files to Metadata.zip. We don't do this for you because Caché doesn't have a portable means of creating zip files. For occasions where you don't have this pre-populated metadata, the initial load can take some time. However following the initial sync, synchronization should be quick as we do sophisticated cacheing and reporting of server diffs. The delay can't be completely avoided because large amounts of meta data have to be transferred over the network. This feature has not yet been implemented but planned for the next release. Yes, Atelier requires all files to have a header which defines the metadata for the file. The extension of the file is not enough ( for example in an INT the language mode is important ). Normally you don't need to know the rules as Atelier will handle this for you. For reference here are the rules. I'll ensure that they are added to the Atelier reference documentation It is required that Caché sources be stored in the file system in a textual format as opposed to the XML format that we have been using for some time now. The primary purpose of this requirement is to facilitate easy comprehension, editing and the performance of diffs. The current XML format captures additional meta-data ( for example, 'language mode' ) that does not appear in the body of the document. Any textual format MUST be able to accommodate this meta-data to ensure that no information is lost. Therefore, for exporting .mac, .int, .inc, .bas, .mvb and .mvi items during export a single line of header information appears as the first line of text in the following format :- Routine NAME [Type = TYPE, LanguageMode = MODE, Generated] "Routine" is a case insensitive string which identifies this document as being a routine document container NAME is the name of the routine item (case sensitive) Following the name there is an optional collection of keyword value pairs that are enclosed in square brackets '[' ']'. Currently three keywords are supported :- Type = TYPE. Where TYPE is one of bas, inc, int, mac, mvb, mvi. LanguageMode = MODE. Where MODE is an integer that is supported as an argument to $SYSTEM.Process.LanguageMode(MODE). Generated. Its presence indicates that the routine is generated. The keywords are optional and if none is specified then the square brackets containing them should not be present (though it is NOT a syntax error to specify an empty pair of square brackets ([])). The LanguageMode keyword applies only for Type=mac or Type=int and will be ignored for other types. The default value for the LanguageMode keyword is 0. Whitespace is freely supported within the header Everything that comes after the single first line is source content and MUST be formatted according to the established rules of the particular document type. There is no trailer that indicates the end of the document. The first line of the routine has to be in a particular format. Check that you didn't break the formatting. Thanks for your feedback. To some extent we are constrained by the eclipse framework with what we can do but clearly the things that you point out can be improved. We will be sure to take this into account in our continued development. Please contact support regarding this Seems atelier is spelt incorrectly in the curl example, here's an example of use curl --user jamien:SYS {"status":{"errors":[],"summary":""},"console":[],"result":{"content":{"version":"Cache for UNIX (Apple Mac OS X for x86-64) 2017.3 (Build 529U) Wed Feb 15 2017 01:28:51 EST","id":"DD2AAF5C-F6C1-11E6-AC71-38C986213273","api":2,"features":[{"name":"DEEPSEE","enabled":true},{"name":"ENSEMBLE","enabled":true},{"name":"HEALTHSHARE","enabled":false}],"namespaces":["%SYS","DOCBOOK","ENSDEMO","ENSEMBLE","SAMPLES","USER"]}}}
https://community.intersystems.com/user/11761/comments?page=2
CC-MAIN-2021-04
refinedweb
1,437
61.56
40-cluster PS3 system models magnetosphere/solar wind interaction: UNH researchers have cobbled together a 40-PS3 cluster so they can solve a climate problem -- the interaction between Earth's magnetic field and the solar wind. Until now, the UNH EOS (Earth, Oceans, and Space) Space Science Center has been running its Open Geospace General Circulation Model (a magneto-hydro-dynamic simulation of the previously mentioned interaction) on a US$750,000 distributed system. Now they can run it on a US$16,000 cluster of PS3s. The heavy investment came in the two-plus months of tweaking the system to accommodate an open-source operating system and in rewriting the simulation program to run on the system. No moving parts: According to MEMS accelerometer chips manufacturer MEMSIC, accelerometer chips becoming standard equipment on consumer devices (enabling volume production) is just one of the reasons MEMS device prices are dropping. The other reason is that new no-moving-parts designs (like MEMSIC's) are getting rid of one of the most expensive components of making MEMS, the moving parts. In fact, according to MEMSIC, the company can run its design on a standard CMOS assembly line. Also, no moving parts means a lower failure rate off the line and a wider range of operating conditions for the product -- there's no moving bits that shock (what acceleromters measure) can damage. An interesting look into the manufacturing process. "Embedded Everywhere" motto for Freescale future: Freescale CTO Lisa Su thinks embedded processors with on-chip sensors will rule the IC business in the future of 2015, redefining the chip industry away from PC-oriented makers towards embedded providers. Already, Su said, "there are about 150 embedded microprocessors around the home ... [plus] another 40 or 50 in your car ... We see that trend accelerating and we predict that there will be over 1,000 embedded devices per person by 2015." Su noted that the big changes in embedded processing won't be from the hardware or software but from the way people's lifestyles will change to accommodate the ubiquitous invisible processor. Su also forecast the three most important trends: Workshop coming in July at GA Tech: This two-day workshop (July 10-11, 2008 | agenda) will cover from ray tracing to LANL's Roadrunner, from applications on low-cost Cell/B.E. clusters to computer vision and digital imaging. It will address programmability issues like language and compiler, programming models and common runtime, and ISV programmability framework and tooling. There is no charge to attend; registrants must be registered by June 30, 2008. Please see disclaimer on use of "LANL Roadrunner" name. Picowatt chip sets low-power record: UMich developers think that their 30picowatt Phoenix processor, designed for medical implants and announced at the IEEE Symposium on VLSI Circuits in Honolulu, could be the lowest-power processor yet developed. It is (chip and thin-flim battery) about 1,000 times smaller than the technologies being used for implants today and since it is only in operation part of the time (and consumes 30,000 times less power when in sleep mode), the developers predict it could last up to three years -- by contrast, a watch battery would power the Phoenix for about 263 years. Qosmio G55 laptop goes Cell/B.E. quad core: What's most exciting about the Toshiba Qosmio G55 laptop set to debut in sometime in July 2008? Probably the fact it is a quad-core portable with a Cell/B.E.-based processor and all that entails -- performance and relatively low-power operations. The four processing elements inside the SpursEngine sport a performance level of 48gflops (look out high-def video-stream coding!), a clock frequency of 1.5GHz, and a power envelope of 10/20W (typical mobile processors run at about 35W). See-through wonder straddles the line between solid and other: There's the macro-obvious: Glass is a solid. (Toss a rock through it and you see this demonstrated.) Then there's the cool answer: Glass is in a transitive state; our observational lifetimes are too short to notice. So what's the truth? Bizarrely, probably somewhere in the middle. The properties of glass allow it to behave at times like both a solid and a liquid. Glass exists in a "jammed" state of matter between liquid and solid that moves slowly. Atoms in glass try to drip down, but because their routes are blocked by neighbors, it acts somewhat like a solid. Glass is trying to express a crystalline lattice structure like so many solids, but its atoms get stuck in an almost random arrangement -- an icosahedron (like a 3D pentagon). You cannot fill 3D space with icosahedrons (just like you can't tile a floor with pentagonal shapes) so it will not form a lattice. Some scientists think that glass is trending toward the crystalline and that eventually (maybe billions of years) it will get there and become a solid. Some don't. Aside: Normally metals form crystals when they cool; this causes weakness along the crystal boundaries (metal fatigue). When metals are made to cool with icosahedrons (as metallic glasses), they don't crystallize and are not subject to the same fatigues. The new Top500 list is out!: The top ten are: For more coverage on the latest Top500 list see Other ways to parse this list would be, say, the maximum-sustained-tflops-per-core: the peak-tflops-per-core: the kW-per-core: the maximum-sustained-tflops-per-kW: the peak-tflops-per-kW: Notice that LANL's Roadrunner shows up in the top five no matter how you slice the data. Please see disclaimer on use of "LANL Roadrunner" name. A supercomputer you can run Windows apps on: The Akka system, installed at the High Performance Computing Center North in Sweden, is a new HPC cluster that comprises a total of 672 nodes, each loaded with two low-power Intel Xeon quad-core L5420 CPUs/16GB of RAM (total of 5376 cores and 10.7TB RAM). With a a theoretical peak performance of 53.8tflops, Akka is ranked 39 on the June 2008 Top 500 list. What's important about this supercomputer, though, is that a small part of the cluster will use IBM Cell/B.E. and Power chips (mostly for the development of parallel algorithms). Unlike supercomputer designs that connect clusters of processors PC-style, this system requires less electricity to run and cool thanks to its compact configuration based on IBM's BladeCenter technology -- the system can perform about 266 million calculations per second per Watt (266mflops/W) based on sustained performance. This blog-based column looks at some of the more interesting problems and challenges posed recently in the Cell Broadband Engine Architecture forum. Originally, paulsimon wanted to know about expected behavior when starting more pthreads than SPEs:? Now there's more. [lowellns]: Is this context switching supported by the libspe2? Or the spufs interface? Perhaps a scenario will help. If I have 6 SPEs and 8 pthreads that do this: <pre class="jive-pre"><code class="jive-code jive-java">spe_context_create()spe_program_load()spe_context_run()</code></pre> what will happen? Won't two threads wait until two spes return or are you telling me that there will be some context switching? And possibly related: How would setting pthread schedule policies affect spe threads once they are running? [SDK Service Administrator]: I don't believe this is supported on the PS3 since it's kernel dependent, so in your case the 2 extra threads will wait for two of the others to exit. Editor: You might also want to see "Changes in libspe: How libspe2 affects Cell Broadband Engine programming" (developerWorks, July 2007). iamrohitbanga wants to know if there are resources for getting the SDK going on Debian: I need help to install Cell SDK 2.0 on Ubuntu Hardy Heron. The reason is Fedora would not support my wifi card. i tried to convert rpm packages using alien, but not succeeded so far. [davidhi] (from 2006): I wanted to get the CBE SDK up and running on my laptop, so I thought I'd share my experiences getting the CBE simulator running on Debian. I went ahead and made debs out of all the packages and did away with install.sh (the postinst scripts do the appropriate work). I ran into two basic problems, one having to do with a Makefile and one having to do with tcl/tk compile-time options. At first the command PATH=$SCE_INST_DIR/ppu/bin:$PATH make -C src was breaking under Debian. After a bit of digging I found out that the Makefile doesn't work properly under make 3.81beta4 (which Debian uses), but it still works under 3.80 (which FC4 uses). I just ended up grabbing a debian package of 3.80-9 and using that for the time being. The breakage has something to do with the $$(@F) sysV make compatibility feature. PATH=$SCE_INST_DIR/ppu/bin:$PATH make -C src $$(@F) After I got everything built/installed, the simulator would fail like so: Segmentation fault on address 0x2c6cc528Restored previous handler.../run_gui: line 30: 6483 Segmentation fault $TOP/../systemsim -cell $* -g After more digging around (noticing that the ldd output for systemsim-cell had libpthread on Debian but not on FC4), I found that this was due to Debian's tk8.4 and tcl8.4 being built with --enable-threads. I don't know why this causes the simulator to fail, but I built new tk and tcl packages without this flag and now everything works peachy. This will be an important thing for IBM to fix, however, because the maintainer of the FC tcl and tk packages has said they are now being built with --enable-threads as of last week. systemsim-cell libpthread --enable-threads [davidhi] (later): Well, just one day after I finished making Debian packages of all the tools, they released new versions of almost everything. I went ahead and built new packages and tried them all out and I didn't encounter any new problems. The Makefile structure in cell-sdk-lib-samples-1.0.1 still fails under make 3.81beta4 and the simulator 1.0.1 still crashes when tcl8.4 and tk8.4 are built with --enable-threads. So again, everything works great as long as I use the slightly older make 3.80 to build the stuff in cell-sdk-lib-samples and rebuild my own tcl and tk packages without --enable-threads. For fun, I also tried to see if I could bootstrap the ppc64 version of Debian in the simulator with cdebootstrap/debootstrap as my system image (I mean, why not remove FC4 all the way). It works fine, although it takes a long time to boot because by default it'll try to add swap, fsck, start up an MTA, cron, atd, inetd, syslogd, klogd, etc. So I disabled most of that. Of course, you also need to change inittab so it doesn't try to spawn getty on the first ttys. Also, I needed to make /bin/sh a statically linked shell (I used zsh) rather than the default ppc64 bash. Otherwise, it'd crash when trying to interpret the scripts called by init. The error: malloc: ../bash/variables.c:1854: assertion botched malloc: block on free list clobbered [davidhi] (later): Some people have asked me how to rebuild the tk and tcl packages without the --enable-threads flag and I thought I'd post some directions in case other people find it useful. First, you can use my packages if you'd like. They're i386 arch and based off of the testing version of tcl and tk. If you are using a different architecture (like x86_64) or flavor (stable) or you'd just like to build them yourself, here's a quick tutorial. You'll need a couple of tools (fakeroot and debhelper): apt-get install fakeroot debhelper. Run apt-get source tk8.4 and apt-get source tcl8.4 in some directory, let's call it "pkg-tmp." apt-get install fakeroot debhelper apt-get source tk8.4 apt-get source tcl8.4 For me, this unpacks some source in the tk8.4-8.4.12 subdirectory and in the tcl8.4-8.4.12 subdirectory. Their names might be slightly different depending on whether you're running stable, testing or unstable. In the tcl8.4-8.4.12/debian directory, edit the "rules" file and remove --enable-threads from the ./configure line. Then, from the tcl8.4-8.4.12 directory run the command fakeroot debian/rules binary and it will build new debs in the top-level "pkg-tmp" directory. ./configure fakeroot debian/rules binary tcl8.4-doc_8.4.12-1_all.debtcl8.4_8.4.12-1_i386.debtcl8.4-dev_8.4.12-1_i386.deb It rebuilds the documentation too, but you only need to reinstall the main and -dev libraries. Use dpkg -i tcl8.4_8.4.12-1_i386.deb tcl8.4-dev_8.4.12-1_i386.deb. Then do the same process for tk8.4-8.4.12 (edit debian/rules, run fakeroot debian/rules binary and install). dpkg -i tcl8.4_8.4.12-1_i386.deb tcl8.4-dev_8.4.12-1_i386.deb There is some order you must do these in (one package will build with threads if the other is installed with threads no matter what configure says): Based on my experience, I'm almost certain tcl needs to be built and installed with no threads before tk. Run ldd /usr/lib/libtcl8.4.soldd /usr/lib/libtk8.4.so and make sure that there is no line like libpthread.so.0 => /lib/tls/libpthread.so.0 in the output (that would imply that they were built with threads). libpthread.so.0 => /lib/tls/libpthread.so.0 SPU_DISPLAY_EXEC & SPU_DISPLAY_ISSUE kabe wants to know why he/she's having a problem with DMA transfer in libspe2: Hey! I have a problem with the DMA transfer. First it worked all fine. I used really simple code and the libspe.h. This worked. Then i noticed that opening an SPE-Thread is quite expensive. So now I wanted to open a thread and keep it. I found some bits about using pthread with the SPEs and the Program I'm trying to convert to work on the Cell already uses pthreads on other architectures to use multiple cores. So I thought that would be a good idea. But now I have switched to libspe2 and the execution of the code just stops in the SPE-code when I use mfc_get. In one instance without changing the code in that area it actually stopped after the mfc_write_tag_mask, so it looks a bit like undefined behaviour. mfc_get mfc_write_tag_mask PPE-Code I'm using: void *evaluateOnSpu(void *data) {int retval;unsigned int entry_point = SPE_DEFAULT_ENTRY; // Required for continuing execution, // SPE_DEFAULT_ENTRY is the standard starting offset. spe_context_ptr_t my_context;// Create the SPE Context my_context = spe_context_create(SPE_EVENTS_ENABLE|SPE_MAP_PS, NULL);// Load the embedded code into this context spe_program_load(my_context, &spuevaluate_handle);//evaluateInfo* info = AB::BSplineSurface::mInfo; // Run the SPE program until completion do {retval = spe_context_run(my_context, &entry_point, 0, &info, NULL, NULL);} while (retval > 0); // Run until exit or error pthread_exit(NULL);}void AB::BSplineSurface::testEvaluateThread(){std::cout << "test" << std::endl;info = mInfo;pthread_t my_thread;int retval;// Create Thread retval = pthread_create(&my_thread, // Thread object NULL, // Thread attributes evaluateOnSpu, // Thread function NULL // Thread argument );// Check for thread creation errors if(retval) {fprintf(stderr, "Error creating thread! Exit code is: %d\n", retval);exit(1);}// Wait for Thread Completion retval = pthread_join(my_thread, NULL);//* Check for thread joining errors if(retval) {fprintf(stderr, "Error joining thread! Exit code is: %d\n", retval);exit(1);}} SPE-Code: #ifndef EVALUATEINFO#define EVALUATEINFOtypedef struct {AB::VecPack2 param;Bool4 mask;AB::VecPack3 result[3];Float4 cDBItemList83;unsigned int cDBOffset[4];unsigned int count[2];unsigned int degree[2];Float4 controlPointList31;} evaluateInfo;#endif/***\brief This function is supposed to realize the evaluation on an SPU*/int main(unsigned long long speid __attribute__ ((unused)),unsigned long long argp,unsigned long long envp __attribute__ ((unused))) {evaluateInfo pd __attribute__((aligned(sizeof(evaluateInfo)))); int tag_id = 0;//READ DATA IN//Initiate copy//program_data_ea >>= 32;mfc_get(&pd, argp, sizeof(pd), tag_id, 0, 0);// ************* Normally it just stops here ******************//Wait for completionmfc_write_tag_mask(1<<tag_id);mfc_read_tag_status_any();http://... I confess I don't really understand everything I'm doing here. Most of the code is from examples found around the net. Did I probably only progress half-heartedly to libspe2? Did i forget something? I found a thread with a similar problem here, bit it was still with libspe1, I think. And it wasn't solved. P.S. I found out that it works perfectly if I transfer a simple char of the length 128. Is there a problem with transfering structs? Am I doing it wrong? Can I use something else? On a sidenote: I've read that spe_context_run should be pretty slow, too. Is there a way to keep the SPU busy while transfering new data? Is that what mailboxes are for? Do I have to use completely different code then anyway? [jmt_dh1]: Well I'll pick up on one thing in the small extracts of code you posted: evaluateInfo pd __attribute__((aligned(sizeof(evaluateInfo)))); You should be aligning it to 16 bytes (or larger). Aligning it to a multiple of the structure size is not conceptually what you should be doing to follow the DMA alignment rules (though you may of course get lucky if the size is a multiple of 16). On a sidenote: I've read that spe_context_run should be pretty slow, too. Is there a way to keep the SPU busy while transfering new data? Is that what mailboxes are for? Do I have to use completely different code then anyway? That's a lot of questions, but have a read about double buffering... [NotZed]: As jmt said, alignment is important. And for the dma size as well. It must be <16 or a multiple of 16. I always seem to hit that problem as I only dabble with cell coding rarely and forget it the next time I get back to it. My own side note: Whenever I tried something like the loop: // Run the SPE program until completion do {retval = spe_context_run(my_context, &entry_point, 0, &info, NULL, NULL);} while (retval > 0); // Run until exit or error It would always seem to "lose the executable" at some point after a second or so and crash -- It seems you need to re-load the programme every time. I'm wondering if this is expected or is it a bug or just a coincidence? [jmt_dh1]: You need to re-initialize the entry point variable every time. So many things to remember :) [kabe]: Thanks for your comments so far. Yes, I constructed the struct so it is aligned to a multiple of 16. Otherwise it won't even compile. I'm not that far that I could run my SPU-programme for a longer time, so I don't know if I have to reload the code. I tried another route now and it kinda works... I'm using the mailbox to tell the SPU where the data it needs starts, then I load for example an int, then I load a float from the starting address + sizeof(int) etc. Doesn't look that elegant. Another point is, that I have to load the data in a special order. There is a bigger array in my data set of 128 Float4. When I first load two other variables and then the big array, it crashes. When I first load the big array and then the other data, it works. At another point when I load an int[4] and two int[2] it crashes and when I load one int[8] and divide it by hand into three different arrays it works. This tells me that there has to be something seriously wrong. Can't you do many mfc_get consecutively? Even when I use mfc_write_tag_mask and mfc_read_tag_status_all after every mfc_get it hangs. What can be the reason for this? mfc_read_tag_status_all [jmt_dh1]: Sounds extremely odd. Can you attach some reproducible sample code? [kabe]: Sadly I am not allowed to post the actual code; I even had to sign a form. But I will try to reproduce the problem with standard data types... Meanwhile I noticed that the ppu-gdb wasn't installed and that it could give info about the DMA. So I installed it, checked it... and didn't understand anything. This is the output: (gdb) info spu dmaTag-Group Status 0x80000000Tag-Group Mask 0x80000000 ('all' query pending)Stall-and-Notify 0x00000000Atomic Cmd Status 0x00000000Opcode Tag TId RId EA LSA Size LstAddr LstSize E get 30 0 0 0x00000000ffa5de68 0x3ed50 0x00010 * get 31 0 0 0x0000000010c2a0c0 0x3f040 0x00000 get 31 0 0 0x0000000010c29ea0 0x3fbb0 0x00000 get 30 0 0 0x00000000ffa5df08 0x3ed70 0x00020 * get 0 0 0 0xd0000000002a6900 0x00e80 0x00000 putl 0 0 0 0xd0000000002f5000 0x00000 0x00000 0x00bf0 0x00008 In another post I read that the * is a sign for trouble. Can anyone help me understand this? [jeshua]: I don't know what a lot of this means, but it looks to me like the two marked with a * both have bad alignment on the EA (Effective Address). The LSAs (Local Store Address) looks aligned properly. [kabe]: That comment pointed me into the right direction, thank you! I really only forgot to align some variables in the PPE-Code. Aligning the variables in the struct leads to new problems, but I think I'll find the answer to that. b.lix wants to know what's the difference between the LOOSE and TURBO modes on the simulator: I'm working with the Cell SDK on two different computers. When I switch the Cell System Simulator into "Fast Mode" I get different messages from the simulator on the different machines. On one I get Simulator now in TURBO mode and on the other I get Simulator now in LOOSE mode. Simulator now in TURBO mode Simulator now in LOOSE mode What is the difference between these modes? And how do I get the second system also into TURBO mode? [mkistler]: TURBO mode is only supported on 64-bit systems. It is a special flavor of FAST mode that uses just-in-time translation of PPC instructions into instructions of the host machine. Since Cell is a 64-bit architecture, it was not practical to support TURBO mode on 32-bit host systems. [b.lix]: I think there is also a FAST mode on 32-Bit Systems. I want to know why I get Simulator now in LOOSE mode and not Simulator now in FAST mode. What is this LOOSE mode? Simulator now in FAST mode [mkistler]: LOOSE mode is minor tweak on FAST mode that performs "blocks" of instructions per CPU before switching to simulation instructions by the other CPU. This is far more efficient but can result in minor timing differences in interrupt delivery and/or synchronization mechanisms. Hot topics roundup FastMalloc(busTransPool): too many chunks Forum statistics for May 2008 Threads: 116 | Participants: 5,016 | Replies: 480 | % threads answered: 22% From DAC: Is multithreading really the best way to exploit multicore systems effectively?: A concerning question popped up at the recent 45th Design Automation Conference: "Is multithreading really the best way to exploit multicore systems effectively?" This reflected the efforts EDA vendors have been putting into adding mthreading capabilities to their tools to help with multicore design; problem is, at the 45nm node, more designs climb over the 100 million-gate mark and break current IC CAD tools. Parallel processing has traditionally relied on threads but threads sort of start bottoming out at four processors. Read the detailed report to see what some of the best thinkers in the industry think about this question, including Gary Smith of Gary Smith EDA -- he thinks threads are dead: "It is a short-term solution to a long-term problem. Library- or model-based concurrency is the best midterm approach." You have until September 2011: The N-Prize ("Nanosatellite"/"Negligible Resources") is a competition to stimulate innovation around inexpensive access to space. To compete, you must launch a satellite weighing between 9.99 and 19.99 grams into Earth orbit and track it for a minimum of nine orbits. It must not cost more than US$2000 must be done before 19:19:09 (GMT) on September 19, 2011. The prize is about US$19,000. Entanglement on film: Quantum entangled images, in this case two random pictures physically separated but linked through their complementary features, have been captured in real time by researchers at the Joint Quantum Institute. They did it by using linked laser beams originating from a single point that produces twin images (a cat face, one inverted and the other backwards) at separate locations. For more on qentanglement, see "Storing nothing and doing it right!,""Photon encoding breaks record,""'It's your mother calling yesterday',""Qentanglement goes where no man ...,""Honey, get out the Qentanglement photo album," and"Tangled up in that quantum net." Saucy algorithm exploits symmetries to crack combinatorial problems: The torture level of the scourge of design automation math -- combinatorial problems like "what is the shortest route to send an Internet message around the world?" -- has been reduced by a new "saucy" algorithm. The Saucy algorithm's developers claim it can solve combinatorial problems by finding symmetries among large swaths of possibilities. (Symmetries are mathematical equivalent branches of a search, interchangeable options that lead to the same outcome so they only need to be calculated once. If you ID all the symmetries in a set before you start comparing outcomes, you can eliminate lots of "duplicates.") They claim that in a test of the previously mentioned Internet message problem, it found an optimum path in under a second. Deskside lets you work with Cell/B.E. code too: Terra Soft's quad-core 970 PowerStation (a four-way SMP system based on the PowerPC 970MP Processor and the CPC945 North Bridge Chip) is a deskside workstation/server that also may be used to prepare and optimize code for Cell/B.E. systems (in fact, Yellow Dog Linux includes the IBM SDK for Multicore Acceleration which is installed by default). You can even use the PowerStation to develop code for and manage clusters built on PS3s or the high performance IBM BladeCenter QS22 systems. TotalView Debugger gets Cell/B.E. support: Blue Gene/P support too. TotalView Technologies TotalView 8.5 source code debugger now lets users debug Cell Broadband Engine architecture applications (as well as delivers enhanced IBM Blue Gene/P support). It supports Linux systems using the IBM Cell/B.E. SDK (SDK 2.1 on FC6 and SDK 3.0 on Fedora 7/RHEL 5.1). When a pre-built BLAS application binary (executable) is run with the BLAS library, the library internally manages SPE resources available on the system using the default SPE management routines. This is also true for the other BLAS applications that do not intend to manage the SPEs and want to use default SPE management provided by the BLAS library. Example application scopy sdot #include <blas.h>#define BUF_SIZE 32/********************** MAIN ROUTINE **********************/int main(){ int i,j ; int entries_x, entries_y ; float sa=0.1; float *sx, *sy ; int incx=1, incy=2; int n = BUF_SIZE; double result; entries_x = n * incx ; entries_y = n * incy ; sx = (float *) _malloc_align( entries_x * sizeof( float ), 7 ) ; sy = (float *) _malloc_align( entries_y * sizeof( float ), 7 ) ; for( i = 0 ; i < entries_x ; i++ ) sx[i] = (float) (i) ; j = entries_y - 1 ; for( i = 0 ; i < entries_y ; i++,j-- ) sy[i] = (float) (j) ; scopy_( &n, sx, &incx, sy, &incy ) ; result = sdot_( &n, sx, &incx, sy, &incy ) ; return 0;} Control with environmental variables BLAS_NUMSPES BLAS_USE_HUGEPAGE BLAS_HUGE_PAGE_SIZE BLAS_HUGE_FILE BLAS_NUMA_NODE BLAS_SWAP_SIZE BLAS_SWAP_NUMA_NODE BLAS_SWAP_HUGE_FILE For more on environmental variables, see "Programming with BLAS: Tuning the library for performance." Taken from the Basic Linear Algebra Subprograms Programmer's Guide and API Reference. Download the SDK 3.0. Check out some reference guides in the Cell Resource Center SDK library. LANL Roadrunner earns its name: Seems LANL's Roadrunner is now poised to take its place as the fastest supercomputer in the world -- think a stack of 100K laptops about one-and-a-half miles tall. In the Roadrunner, two IBM QS22 blade servers and one IBM LS21 blade server are combined into a specialized tri-blade configuration (which can run 400gflops) for a total of 3,456 tri-blades. Standard processing like file system I/O is taken care of by the Opteron processors while math-/CPU-intensive tasks go to the Cell/B.E. processors. (There are more interesting facts in the Roadrunner fact sheet.) Even the New York Times is getting in on the story: "If all six billion people on earth used hand calculators and performed calculations 24 hours a day and seven days a week, it would take them 46 years to do what the Roadrunner can in one day." Other coverage includes: You got the hardware; what about the software?: This EE Times article discusses efforts to enable all sorts of software to take advantage of the speed of multicore systems. Buddy Bland, project director for a major supercomputer center at Oak Ridge National Lab (which hopes to install its own pflops system this year), noted that "getting applications to scale is our biggest challenge" and goes on to add that "it turns out you get just as much advancement from better software and algorithms as you do from better hardware." Oak Ridge has been testing such parallel programming languages as IBM's X10, Cray's Chapel, and Sun's Fortress. Bill Thigpen, chief of supercomputing engineering at the NASA Ames Research Center, has observed an increasing gap between the rate at which benchmark performance is rising and the increases in the ability to do actual work: "One of the challenges is being able to get the available work out of the theoretical performance peak." He goes on to note that scaling is a challenge: "Communications becomes a bigger part of your work. If you spend increasing time passing information between the processors, the processors are not doing as much work on the real issue." The article goes on to illuminate why the important thing researchers learn from the LANL Roadrunner may not have to do with speed but with how the heterogeneous processors interact. UPDATE 06/12/08: Panel on LANL's Roadrunner at ISC08: In a special panel session at the International Supercomputing Conference (June 17-20, Dresden; session on June 18) entitled "RoadRunner: The First Petaflop/s System in the World and its Impact on Supercomputing," two leaders of the drive to build Roadrunner -- Dr. Andrew White from Los Alamos and Dr. Don Grice of IBM -- will be joined by HPC experts to discuss the impact the system will have on the world of computing. Included are Other conference highlights include
https://www.ibm.com/developerworks/community/blogs/powerarchitecture/date/200806?lang=en
CC-MAIN-2014-15
refinedweb
5,167
62.38
Feedback Getting Started Discussions Site operation discussions Recent Posts (new topic) Departments Courses Research Papers Design Docs Quotations Genealogical Diagrams Archives Ruby and python have been mentioned many times on LtU, but I would like the opinnion of gurus here. Which language is more interesting for those who have deeper knowledge of programming language theory? I'm not so concerned with speed of respective VMs, the community around these languages, even their syntax, etc. I'm iterested in the languages (and their APIs I suppose). For example, for practical programming, are ruby's continuations significantly better than python's co-routines (2.5)? How do 'lambda' functions in each language compare? Is one language closer to 'functional' programming than another? Is one language better than another for building logic programming or constraint logic programming constructs? Is one language better than another for building the kind of functionality found in concurrent languages (erlang, Oz)? Python and Ruby are very similar, but I think Python is behind Ruby in terms of supporting FP. I'm mainly a Python user, so there may be an aspect of Ruby-envy in what I have to say. Python doesn't support full continutations or even coroutines; instead it supports "generator" functions which create a kind of limited coroutine. A generator function can yield control to its caller with the "yield" keyword, but yielding is like returning in that only the generator itself can yield--another function cannot yield on the generator's behalf. Generators are wonderful for simplifying the implementation of iterators, since a generator is indistinguishable from a function that returns an iterator, but they tend to be awkward for other sorts of tasks. There is a "stackless" version of Python with continuation support (), but there are no plans to ever intergrate this feature into the official version of Python, partly because of implementation difficulties and partly because Guido van Rossum thinks continuations are too confusing to use. Overall I'd say the Python community and GvR in particular are slightly hostile to functional programming, and the language reflects this in various subtle ways. For instance, there is a very sharp distinction in Python between expressions and statements, and lambda expressions in Python are restricted to a single expression. This isn't a problem per se, because any expression involving lambda can be trivially rewritten to use an ordinary Python function definition, but the overall effect is to strongly discourage the use of higher-order functions. This is in sharp contrast to Ruby, where HOFs are extremely common and lambda expressions have such a lightweight syntax they're pactically invisible. Python doesn't support full continutations or even coroutines; instead it supports "generator" functions which create a kind of limited coroutine. This has changed for version 2.5, see the PEP. Of course, continuations are still more general. Also, the new "send" method is unlikely to please PL aficionados; it really seems like a hack. I expect that the average python programmer won't use it very much. It will probably be used by library authors when they really need coroutines. cheers, Jess ...and lambda expressions in Python are restricted to a single expression... I don't believe this is quite right. I believe python can have plural expressions in lambda, however it can have no expressions that allow for blocks of code (for, while, etc). The restriction is not an arbitrary one. It has to do with the lexical structure of python itself. Due to the block-delimited scope, and the lack of any explicit scope tags, Python cannot represent unambigous lambda expressions. When Guido first went to implement lambda, he ran up against this basic constraint, and was probably ill advised to attempt lambda at all. From having tried it myself, in a custom python parser of my own, I believe there is simply no way whatsoever of doing it without introducing an explicit scope token. If we had to enumerate the "language features" offered by Python and Ruby, Ruby would probably be the most advanced. A very useful (FP) feature that Python lacks is the declaration of anonymous blocks -- Python only offers lambdas which can only contain expressions, not statements. The lack of anonymous blocks is quite painful when dealing with asynchronous libraries such as Twisted, when you often have to give callbacks. Anyway, from a "language design" perspective, I find that Python is more attractive than Ruby. Its syntax is rather consistent, and makes the code understandable and reabable, even by newbies. Ruby code is sometimes quite cryptic to read, which is partly a consequence of its Perl roots. So to me, some features put aside (such as anonymous blocks), the difference between both language is mainly a question of style/taste. Python is more like "there should be only one way to do it", while Ruby is more like "there should be more than one way to do it". While simple, this design strategies can make big differences in the end. it must be said that in python you can have nested functions, so even if they are not inlined as arguments you may still get some of the advantages. But there is a small issue which I think is related to parsing: assigning to an external variable will create a new local one instead of updating the name/value binding, so you can access them like this : >>> def f(n): ... def g(i): ... return n+i ... return g ... >>> zero=f(0) >>> zero(1) 1 but you could not write the usual "accumulator": >>> def f(n): ... def g(i): ... n+=i ... return n ... return g ... >>> zero=f(0) >>> zero(1) Traceback (most recent call last): File "", line 1, in ? File "", line 3, in g UnboundLocalError: local variable 'n' referenced before assignment In ruby you can't really have nested functions but if you use a lambda in a method it will work as expected. You can write an accumulator, it just involves a little stickyness due to the way that python syntax works: >>> def f(n): ... nn = [n] ... def g(i): ... nn[0] += i ... return nn[0] ... return g ... >>> zero = f(0) >>> zero(1) 1 >>> zero(1) 2 Because of the weird necessity ot package up the variable in a container people often assume python can't do these things. It can, it just does them strangely. This is not really the same thing, imo, in the sense that while the classical closure example changes the name/value binding this example just accesses a mutable object. Well, possibly using frame objects that could be done anyway, but that would be cheating :) Oh, and I even know the trick to do that with a lambda ;) They're fixing this in python 3.0 (and possibly 2.6): Python 3.0a2 (r30a2:59382, Jan 4 2008, 13:31:57) [GCC 4.0.1 (Apple Computer, Inc. build 5367)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> def f(n): ... def g(i): ... nonlocal n ... n += i ... return n ... return g ... >>> zero = f(0) >>> zero(1) 1 >>> zero(1) 2 I know that for Python you can use Candygram to get Erlang style concurrency primitives and semantics. I don't know if anything similar for Ruby exists. Probably, but I've never seen it. When you talk about code semantics, yes, you can have concurrency in Python. But you will never get anything close to true concurrency running on any real machine. This is due to the notorious Global Interpreter Lock (GIL) of Python, which basically prohibits parallel execution - even on multicore. I use Python every day, but I think for real concurrency it is a dead end (you can tell by the fact that the most popular Python module to achieve real parallelism is multi-processing which maps a threading API unto OS processes (sic!)). Unless, of course, something dramatically happens (the Unladen Swallow guys are pushing in that direction). As of today, you can use concurrency constructs to better organize your code, and maybe exploit a little bit of WaitIO parallelism, but that's about it; the general approach in Python is flawed (which is another case of the problem of concurrency as an afterthought, which Joe Armstrong once pointed out). If you want to learn more about the limitations of Python's threads and the GIL, check out this awesome presentation by David Beazley (video, slides). If you want to explore concurrent programming, Erlang and Oz are far more suited. I cannot comment on the Ruby side. Ruby is Objects all the way down, python isn't. Which Python object is not an object? Seems like Rubys still don't know what they are talking about when they consider other languages. Does make a bad impression on me. But enough about language communities for the moment. ...an impression as made by Pythonistas that don't read their own docs.... I apologize for blaming Rubys in this affair. I guess this sentence hasn't been updated since Python 2.2 ( for 5 yrs? ) which ironically already completed unification ( i.e. "new-style classes" ). Swollen programmer egos with great future plans are almost always ludicrous. When they start to pin down their ToDo lists in beginners tutorials right at the moment where they implement the features to come, it's going to be unhealthy. Types were still first class objects, they just weren't classes suitable for inheritance. Ruby people like to insist that it is better because the language is 'purely' object-oriented, like java. Don't get me wrong, I don't have a problem with Ruby, but why does 'multi-paradigm' have to be a dirty word? C:\Python21>python Python 2.1.3 (#35, Apr 8 2002, 17:47:50) [MSC 32 bit (Intel)] on win32 Type "copyright", "credits" or "license" for more information. >>> def foo(*a): ... for b in a: ... c = b(1) ... print c ... >>> type(str) <type 'builtin_function_or_method'> >>> type(int) <type 'builtin_function_or_method'> >>> type(float) <type 'builtin_function_or_method'> >>> foo(str,int,float) 1 1 1.0 >>> In Java int / char / ... are NOT objects and have to boxed and unboxed into objects to be treated as such. (Especially before you can use them in the container classes.) Tell me, my Python has got a bit rusty, and this whole unification thing has progress majorly since I last looked at Python closely.. Can you do this? (I find this so beautiful...) class Integer def factorial return 1 if self <= 1 self * (self-1).factorial end end puts 10.factorial or nice and consistent, but less useful than the above example, perhaps inherit from a Builtin... class Beads < Integer end These were things you couldn't do in earlier versions of Python. It may have changed since then. As of python 2.4, there still is the difference between builtin types and user-defined. You can't add attributes, nor modify in any way the builtin behaviour of builtin types, unless you explicitely inherit from them, like in your second example. But then, some would argue this is saner -- since someone can't turn an int.add into int.multiply in the middle of a program -- and allows for better optimizations... I believe this Ruby feature allows for some sweet DSL-handling, but i guess that's not something python folks are interested into, since "there should be one way to do it" and DSLs clearly allow for a lot more... I like python and i like ruby. But i'm still waiting for Perl 6. :) You address two issues. One is subclassing, another is open classes. int is not an open class so you can't add methods to it but create a custum subclass: class Integer(int): def factorial(self): if self: return self return self*Integer(self-1).factorial() >>> Integer(8).factorial() 40320 : return self return self*Integer(self-1).factorial() >>> Integer(8).factorial() 40320 The Integer class is indeed open but it would be a redefinition ( rebinding ) of the name Integer if you tried to add another method with a new class statement later. >>> def iseven(self): return not bool(self%2) >>> Integer.iseven = iseven # define new Integer attribute 'iseven' If you don't want to refuse Ruby style open class definitions as shown in your Integer example you have to define a custom metaclass ( e.g. a metaclass "partial" which is inspired by the language functionality in C# ) that registers your definitions and fit them together. The syntax would be like this: class Integer(int): __metaclass__ = partial def factorial(self): return 1 if self else self*Integer(self-1).factorial() class Integer(int): __metaclass__ = partial def iseven(self): return not bool(self%2) else self*Integer(self-1).factorial() class Integer(int): __metaclass__ = partial def iseven(self): return not bool(self%2) That is roughly the current state. Maybe you can help me with something. I've never seen a good use case for open classes. Take your example. In Python I would write: def factorial(n): if n <= 1: return 1 return n * factorial(n-1) ...which is shorter, and more general, and won't clobber anyone else's implementation of factorial. You can subclass "int" in Python, but I've yet to see a good use case of that either. ...a sense of right place for it. Suppose you found the Integer class had a '+' method, but not a '-'. (I'm not saying Ruby's base classes are so deficient, I'm merely "what iffing") You could put in a non-class method as you did for factorial. But it would irritate, it would grate, it would hurt (mine at least) the aesthetic senses. You would strongly desire to put the inverse method with the forward method _on the class_. At least this way, you can... * Keep your code clean. * Create an elegant proof of concept. * Having done so, create an RCR (Ruby Change Request) where you submit your working implementation. * And when the change is accepted, odds on it will be very close to what you designed. * Thus the next version of ruby will be improved, and your code will pretty much "work out the box" with it. I choose Integer class purely for tutorial reasons. Everyone knows what it is what it does. Where this is far more likely to be useful is on the far more sophisticated standard library classes available in Ruby. eg. Networking / XML / ... classes.. As a software developer (more engineer than artist) I'm more interested in code that works and will continue to work than in code that looks pretty. When I program in Ruby I avoid adding methods to existing classes like the plague. (In Scala, they've thought things through a bit more so that "open classes" are lexically scoped) Monkey patching is a bit different: Sometimes that's the only way to fix a bug and get your code working without physically patching someone else's code (which has its own maintainability headaches). Both Python and Ruby allow monkey patching in pretty much all situations where it is necessary. You can't change the way that integer add works (without subclassing) but then you aren't likely to find a bug in Python's implementation of integer add, so why would you want to replace it globally.. What if GvR implements your factorial function (that you pulled into the global scope from your math lib with a from math_utils import *), but changes the parameter order? Ah-ha, you shouldn't be polluting the namespace like that--you should be importing it like import math_utils as mu. But guess what? It's considered best practice in ruby to put the class re-opening code in a separate module and then explicitly require that module--you always know when you get the modified version of the class. You could go a step further (if you've changed a method, which is unusual, normally you're just extending the class) and add a class variable which flags that the class has been modified. If that then opens up the possibility of bugs because somebody has changed the behavior of a method of the base class, inheriting from the base class and changing the behavior of an inherited method (or just adding new methods) opens up the possibility of bugs because you forgot to wrap a built-in type in your inherited class or you called a method that returns a built-in type that you forgot to wrap. E.g: class Int(int): def print_add(self, n): y = self + n print "%d + %d = %d" % (self, n, y) return y x = 5 x.print_add(5) # oops, x is an int Int(x).print_add(5).print_add(5) # oops print_add returns an int Neither the python nor ruby way is idiot-proof, but neither will bite you very often if you follow best practices and know what the functions and methods you call are doing. One of the best things about having open classes is that a lot of static methods disappear. A lot of the static methods I write in Java or C# (before extensions), for example, are utilities that operate on a parameter of built-in type. To implement the same function in an open system I would just add that method to (what used to be) the parameter's class. For example, let's say you want to repeat a string. In Java I might declare a method Repeat in a new StringUtils class. Then to call it I have to say StringUtils.repeat(someString, 4). Repeat StringUtils StringUtils.repeat(someString, 4) In a language like Ruby or Smalltalk you would just add the repeat(n) method to the String class. In Ruby at least you can put those class extensions in a module and mix it in at your leisure. repeat(n) String The other benefit is the ability to remove/nullify dangerous methods in an instance of the runtime and let users run code in a sandbox. P.S. Ruby already has String.* String.* I think the lesson to take from the awkwardness of class extensions in closed sysetms is not that open classes are good, but that functions and namespaces are more natural, since the invocation of calls bundled with the abstraction, and invocation of calls in an extension look the same. Or you could take C#'s approach and create so-called "extension methods", but from what I understand extension methods have a somewhat complicated resolution strategy. a nutjob Please refrain from this sort of language on LtU. Hi Doug, thanks for your friendly words. See you on RuPy in April next year ;) Cheers I've been programming C++ multithreaded applications for many years and I have never had any problems with it. Of course I never attempted, for example, to synchronize access to bitfields. I think that the "gotcha's" mentioned in literature are abnormally overemphasized. There are well-established techniques to code multithreaded applications using hardware threads. I am not saying it is necessary (i.e. software threads may be Ok in the majority of cases), but it is not that difficult as everybody says. ...if you have a good architecture and/or don't care about the odd sporadic unreproducable bug. Both approaches work and are used extensively. Failure to be anal about the Good Archiecture guarantees the presence of races. Too many companies have bad threading architecture and then "test it into the shape of a product". Still has thousands of defects, just not reproducible on the test bench. Things like failure to use "volatile" correctly, failure to obey the strict aliasing rules, etc also make these bad threading architectures fragile and sensitive to changes in optimization parameters and/or versions of the compiler. These problems are not 'first class' problems of threading. All the problems you mention are problems of the C/C++ programming language that leaves many things unspecified, rather than the problem of hardware threads. ADA has hardware threads but none of these problems, from what I have seen so far. While processors have a difference between "register" and "external memory" volatile and the exact meaning of that is "first class". While processor are pipelined and a good optimizing compiler _has_ to reorder instructions, the exact meaning of a variable at each point in the code will be precise from a single threaded point of view, but fuzzy from a multithreaded point of view. Partly we have been shielded from these issues by... * 99% of our code only runs on single CPU. In future more and more cpus will be dual core or hyperthreaded. * Compilers are starting to push the boundary on pointer optimizations. More an more optimizations assume "strict aliasing rules" apply. Is your code strict aliasing safe? How many of your colleagues even know what that means? * Inlining and partial evaluation becomes more aggressive with each generation of compiler. * Pipelines become longer with each generation of CPU, and compilers have to reorder deeper to gained the advertized performance benefit. But multithreaded programming is not affected by the issues you mention. No optimization can mess up the simple 'lock->operate->unlock' protocol needed for most, if not all, of cases. Exactly what I'm talking about. Excellent Reference! Thanks. Of course, the compiler needs to know if it should put memory barriers around some instructions. What I meant was that C, as it is implemented right now with specific optimizations for threads support, is just as good for threaded applications as ruby or python is and that you do not need a VM to do threading properly; threading can be hardware-driven. Optimizations that current compilers offer can not mess up threading, even if it is hardware based. Your last statement doesn't hold, there are existing compilers that include optimisations that will mess threading up. And while the techniques for shared state concurrency are known, they're still error-prone enough in practice that I've frequently seen the advice "don't unless there's no other way, and there probably is". In terms of Joe Moron up the corridor's ability to make your code behave badly, it's far worse than pointer arithmetic. Achilleas wrote: There are well-established techniques to code multithreaded applications using hardware threads. [...] it is not that difficult as everybody says. Could you list what you had in mind, or at least point to a website? I've never run across such a definitive list of "well-established techniques". I know there's simple advice, like reducing the scope of code that is threaded and using objects that are immutable, but the hard part is enforcing these techniques. The compiler doesn't tell you when you made a mistake; instead you get nasty bugs. Philippa wrote: In terms of Joe Moron up the corridor's ability to make your code behave badly, it's far worse than pointer arithmetic. I'd just like to add that it doesn't take a "moron" to screw up threading. I figure we're all Joe Moron for an hour or two sooner or later. 1) use RAII. 2) lock operations, not data. 3) don't lock immutable objects. 4) share resources between threads, only if needed; otherwise, use synchronized queues. It is not very difficult to enforce the above. The most important is #1, which makes synchronization very simple and effective, and the synchronization primitive will be unlocked in any case. I fail to see how you're going to enforce this in most languages in a case where someone's just plain forgotten to include the locks. And if someone does a 'memset(0, 0, 0xFFFFFFFF)' the memory of the process will be wiped out. The argument here is that a lowlevel programming language allows efficient problem-free multithreading based on hardware, and there is no need to simulate threads using software. And not everyone agrees with you about the problem-free part. There's a reason we're not all writing code using macro assemblers any more. ...is patently ludicrous, and as I've pointed out before, one of the reasons you tend not to get responses from people is your obstinate refusal to learn a single solitary thing from anything that's said to you. You continue to engage in proof by repeated assertion, show no interest in actually learning anything about programming language issues, and simply browbeat everyone who's gullible enough to attempt to engage you. Regretfully, at this point, I'm afraid that I have to suggest that you be the second person to be banned from LtU for trolling. Achilleas, a problem with this discussion is that we don't have sufficient information to evaluate what you're saying. Your sketch of an approach to concurrency raises more questions than it answers — Jeff and Neil have raised some of them, but we don't have detailed answers. LtU depends heavily on links to papers or articles. Your argument would be much easier to evaluate if we had a link to a writeup of the approach you're thinking of. From what you've said so far, it sounds as though you're willing to accept certain tradeoffs in exchange for a simple programming model. As an example of a tradeoff, it sounds as though you're willing to accept reduced concurrency on multi-CPU machines, or perhaps you aren't working with multi-CPU machines. That's fine in some situations, but not every programming problem is similar to the ones you work on. For example, the threading issues in a typical GUI application don't compare to the kinds of issues that come up in a heavily-loaded multithreaded server application. In the former, unnecessarily blocking due to overzealous locking is virtually a non-issue; in the latter, it can mean the difference between success and failure of an application. When discussing programming languages, we can't just restrict ourselves to one kind of application, or one approach to writing code — we have to acknowledge that there's a bigger picture, even if it may not be of interest to us personally. One huge benefit of PL and CS theory is that it allows us to understand issues that we might never have personally encountered. Along those lines, I would highly recommend the treatment of concurrent programming in CTM, if you haven't already read it. That ought to help in understanding where some of the other respondents in this thread are coming from. Also, please bear in mind that many, perhaps most LtU readers have an interest in ways to improve the state of the art in programming languages; indeed, that's one of the purposes of LtU. Arguing for the status quo in languages like C and C++ is not usually very interesting. It might be, if there really were a case for saying that there's absolutely no benefit to competing approaches. However, both the practical evidence and the theory is against that being the case. I agree with Anton. Achilleas, it seems like you are either uninterested in programming languages (as languages), and thus uniterested in the expressiveness they have to offer, or that you don't appreciate what they can do to help regarding concurrency. Both these issues were discussed here (with plenty of references) many times, including in a paper posted yesterday to the homapage. I suggest you look at these discussions, study the references, and comment on specific things you find objectionable. Note that from the perspective of LtU the expressiveness of the language constructs is the main thing, implementing them as efficiently as possible comes second: efficient implementation should be possible, but might still not exist in the marketplace. Thus, it is more fruitful to discuss the advantages (or lack thereof) of the language constructs, and move the discussions about the speed of hardware threads elsewhere (note that seening these as mutually exclusive is certainly not the only option). How do you distinguish code that is meant for concurrency from non-concurrent code? Do you mean from a compiler's perspective? because from the programmer's perspective concurrency is in the design. In case you are talking about the compiler, there is no compiler than can automatically identify which resources should be locked and which should not be, and there isn't going to be one in the near future. Why are operations locked and not data? Because locking an operation prevents from making difficult-to-find problems. For example, if I have a synchronized double-linked list, I have to lock the operations 'insert' and 'remove' and not the 'first ptr', 'next ptr' members etc. What operations should be locked? The operations accessed from more than one thread. How do you know what objects are immutable? Immutable objects are 'const' (for those languages that have 'const', that is :-) ). How do you prevent deadlocks? By locking multiple resources with the multi-lock function the host provides. How likely are you to find bugs in testing? As likely as in single-threaded apps. How easy is it to change the code and not violate previous assumptions? As easy as your design makes it to be. If you use separate locks for operations that alter the same data structure, then you have not got a correct solution. Concurrent access to shared mutable state requires synchronisation not just between multiple threads performing the same operation, but also between multiple threads performing different operations on the same data structure. If 'insert' and 'delete' have separate locks, then how do you ensure that a thread attempting an insert won't interfere with another thread attempting a delete at the same time? If, on the other hand, you mean that you encapsulate all lock handling within some set of operations rather than allowing anybody access to the data structure, then this of course seems like a sane thing to do. However, it's not that simple as concurrent operations are usually not composeable: e.g. if you want to do something like: if (!container.contains(key)) { container.insert(key, value); } then you run into problems as 'contains' will release its lock before 'insert' acquires its lock, leading to interleaving. So you need to either provide external access to the locks: container.lock(); if (!container.contains(key)) { container.insert(key, value); } container.release(); Or provide some form of transactions or other way of performing compound updates. This stuff is really hard to get right. I'm currently marking undergraduate concurrency coursework submissions (in Java, which is at least easier and more consistent that C/pthreads), as I have done for the past three years. In that time, I've only seen a handful of absolutely correct solutions. This leads me to believe that the current mainstream approach to shared-state concurrency is just too difficult. If you use separate locks for operations that alter the same data structure, then you have not got a correct solution. Indeed. I think it is obvious, so I did not mention it. Or provide some form of transactions or other way of performing compound updates. This stuff is really hard to get right. It is not hard at all. Just use RAII. It has worked beautifully for me and I never had a synchronization problem at all. EDIT: The example you mention is a perfect one of a operation composition and it really proves my point. The composition of operations 'contains' and 'insert' are combined to form another operation, and it is this new operation is locked. no matter what the ASCII looks like, no matter if you use POJava, or Erlang Actors, or your combined operations, or even STM, the point is that some human has to identify what the critical sections are, and also how to merge them. seriously, if the developer knew the answer to that in every case, then the rest is just syntax. now, i'm not saying that syntax doesn't matter, and i'm not saying that there isn't a spectrum of betterness, but i don't believe (from what i hope i understand, apologies if i'm still just confused!) that the locking operations you talk about really are going to be the magic pixie dust i really want. :-) I agree with you that is what needs to be done.... But I have on my desk 200000 lines of real world C and a nifty Ruby script that follows call graphs from thread starts to static resources that will show you clearly how uncommon that sense is in practice.. You also forgot to mention things like "volatile" and thread contexts (interrupt service routine vs deferred service routine / timer context vs types of locks like interrupt masks vs disable all interrupts vs scheduler lock vs mutex vs condition variable vs semaphore vs bit semaphore vs counting semaphore vs .... sigh. Believe me, I have ample proof sitting on my desk right now that native threading and the sense of the common man don't go very well together. But I have on my desk 200000 lines of real world C First of all, you have 200,000 of C code. I have 3 C++ apps, one is 120,000 lines of code, the other 45,000 lines of code, and the last one 10,000 of code, all heavily multithreaded, but I do not have a single problem of handling them all (and I am the solo maintainer!). I suspect that the reason is that RAII saves my day. You also forgot to mention things like "volatile" and thread contexts (interrupt service routine vs deferred service routine / timer context vs types of locks like interrupt masks vs disable all interrupts vs scheduler lock You can not prove that native threads are evil compared to non-native threads using operating system-level examples. Interrupt service routines, deferred servicing, interrupt masks are all things that belong in the domain of kernels/drivers and a non-native thread implementation has nothing to do with them. vs mutex vs condition variable vs semaphore vs bit semaphore vs counting semaphore vs .... sigh. All these things are handled transparently using RAII. The apps I mentioned above contain mutexes, semaphores and events, and I never had any problem handling them with RAII. It may be boring to repeat it, but RAII is very important. (and I am the solo maintainer!). Why are you implying that being a solo developer is harder? It is much, *much* easier. i think it is also that the ASCII syntax is not over-bearing when you have stack-allocated things because the destructors are called w/out having to have noisy ugly evil bad java style try/catch/finally. i.e. even IDisposable is an ugly hack to my mind. maybe D's scope-exit is the most reasonable way out from t/c/f? Philippa Cowderoy: In terms of Joe Moron up the corridor's ability to make your code behave badly, it's far worse than pointer arithmetic. I often work with Joe's cousin, Moe Optimistic, who doesn't think the problem will come up, but with pretty much the same results as code written by Joe. Moe gets it, kinda, but thinks the issue of correct multi thread protection is academic, and not a real world problem. Then I have to prove my crashes were caused by them updating non-locked shared state, which makes me look unproductive (because maybe those crashes are just my mistake) up until I show everyone's recent unstable system experiences are directly attributable to Moe's refusal to heed my forewarning. I hate watching glibc abort in malloc() when memory is corrupt due to memory management under completely unprotected races, resulting in updates to deleted memory and freed but reallocated memory. It's too late to pin blame on the code which made the error. Many folks believe its fine to write code wrong at first, and slowly run the problems to ground later, through incremental refinement and defect removal. But, making your code correct under threading is not something easy to do when you reach the fit and finish stage of development, with looming deadlines. The well-known techniques for writing correct multi threaded code only have nice results when everyone actually uses these techniques with some diligence. :-) Joe and Moe make life hard for the rest of us. I can't figure out how to put the fear of god into Moe; but maybe the lesson will be learned. Before I reach the head-shaking stage, I usually ask Moe the following series of questions, after I see he has some refcounted objects shared widely in the code, which are casually modified by methods without benefit of any kind of mutual exclusion. I already know we have lots of threads. Q. Are these objects used by more than one thread? A. I don't think so. Q. We have lots of threads -- are these objects used by this thread here and that thread there? A. Yes, they are. Q. Why don't you use a lock for this state you change? A. That state won't change when more than one thread is involved. Q. How do you know that? A. It won't be a problem. Q. Have you thought about making the objects readonly once they become shared? A. That's too complex. Q. No, it would be easy to have these objects copy-on-write once you tell the object to 'freeze' when you're done building them and you actually start to share. Want to do that? A. Too much architecture. I just know I won't update those objects when more than one thread is involved. Q. And you don't want me to add something to check, just so we'll know there's a problem if you're wrong? A. No, speed is very important. Q. Objects used by multiple threads should either never change, or have some kind of mutex. This machine you're running on has multiple CPUs, and this operation happens very many times a second. It's going to fail if you read and write concurrently without a lock. How do you know this update here won't affect an instance that's been handed to another thread? It's refcounted and you're casual about passing around references. Are you sure? A. Yes, I just know it will be okay. As near as I can tell, folks want to apply the YAGTNI (you are going to need it) principle to multithread safety, which seems like lousy procedure to me. They're afraid you'll lock unnecessarily and lose performance, and would rather risk instability until a need to lock is later proved. If this increases the cost of development for everyone at all times up until the proof is found, then so be it. ...believes we live in the best of all possible worlds. The pessimist fears that he is right. I'm depressed now. I had some vague optimistic hope that Joe & Moe were purely local and perhaps there was another place where I wouldn't have to endlessly explain and re-explain these issues. You have destroyed that thin ray of hope. you are all hired, trust me! ;-) Unfortunately, what's needed is the ability to say "convince us it works - your word isn't good enough". Which tends not to work so well on a social or political level, it upsets people and leaves room for stupid games ("I don't like you so I'll make you do twice as much work as everyone else proving every last detail!"). . I guess this probably has something to do with why I'm a lot more disciplined about side-effects these days. Of course, I have the luxury that I'm pretty much always working on single-woman projects - despite which Joe still manages to sneak some code in once in a while. Thankfully at least Moe's being kept out. I frequenty use mutexes and critical regions in my apps. The code is very simple: Lock<CriticalSection> lock(cr1); ... or Lock<Mutex> lock(mutex1); ... I do not see how there is a problem with that, and what optimizations can break the synchronization. I have never had a nasty problem with C++ from 1998 that I have been doing this. If a compiler breaks the above due to optimizations, then it is the compiler that should be blamed and not hardware threads. And certainly doing software threading (ala ruby or python) does not buy you anything. Any sufficiently aggressive non-lock-aware dead code elimination could decide that a lock isn't actually doing useful work. Any rescheduling that isn't lock-aware could move things out of the resulting critical section. If we accept that locks can be implemented by libraries, there needs to be a language-level mechanism for indicating that no, the sequencing really does matter and so do the side-effects even if the end result is the same state you started in. Post-'98 C++ is not somewhere you'll encounter this. But someone writing a compiler will sooner or later trip over it or else consciously avoid the problem from the outset. Not everybody's working from your viewpoint. My argument here is that you do not need to simulate threading with a VM and a software model, because hardware threading works just as fine. If the compiler needs to be aware, so be it. It goes without saying that a sufficient lowlevel programming language like C/C++ (which hits the bare metal) must provide ways to the programmer to say "here do not do this optimization" or "here do this optimization in this specific way". That's what #pragmas are for. If these changes are hidden from from the programmer inside a library, then the programmer need not be aware of them. Still, no problem for me. The issue is simply that, if the language specification itself doesn't take hardware threading into account (the C and C++ specifications presently don't), any facilities you may attempt to use to ensure correct hardware threading aren't likely to be portable, are unlikely to have carefully-defined semantics, and may therefore be incomplete. Also, as a practical matter, if you're reduced to using #pragmas to disable specific optimizations, it's very difficult to be exhaustive in explicitly disabling all the unsafe optimizations (especially since they and their safety may change across compiler versions). You also can't necessarily control the #pragmas with which third-party libraries were compiled, and sometimes that can matter. I don't disagree about the usefulness of RAII for defining critical sections, and the importance of having good libraries for this stuff, but in the case of C and C++, clarification of the language specifications is required for such libraries to be robust. Broadly speaking, you want to do the right things, but in the absence of well-defined hardware threading in the language specification I think your unquestioning faith in your compiler vendor is misplaced. How do you find the concurrency semantics (and indeed, the sequentiality semantics, if any) of ToonTalk (as compared to say of Ruby)? Ignore "sensors"; consider only a subset of ToonTalk where birds and trucks are the only communication. I ask in this context because it seems that in the comparison of C++ to Ruby concurrency, issues of safety are on the table, and message semantics seems to me very safe; I can't think how even Moe could mess it up. Message-passing is desirable in that simple things are safe by default, but for more complex things it's certainly possible to create all the same bugs (deadlocks, races, etc.) which you can with locks and shared state (they are duals). Ruby is painful in that it doesn't have well-defined concurrency semantics, although I am one of the people working on changing that. Presently one must rely on the semantics of the platform underlying the Ruby implementation (Java for JRuby, for example). These statements are largely content-free: Rubies yield is just so cute. Rubies blocks make decoupling layers so easy and obvious, it's hard to spot that you have done it. Articles on double dispatch in C++/Java magazines make me giggle. It's all just so much harder than it needs to be. Ruby is the counter example. Mixin's are really really nice. Duck typing is really really a Good Idea. What is it about Ruby that makes people talk this way? (Though I note you missed out “simple and intuitive”) I'm not saying you couldn't formulate a decent argument using the intent behind these statements, but as they are a decent argument they do not make. Ruby's culture tends to be very "touchy-feely" compared to most languages. I actually like that for the most part, but in cases like this it can admittedly be counterproductive. I'm not sure that most of the statements here are quite correct either: I would not dare mention double/multiple dispatch as a selling point for Ruby (it sucks in that respect). These statements are largely content-free: .... What is it about Ruby that makes people talk this way? (Though I note you missed out “simple and intuitiveâ€) It's not the fault of Ruby-the-language. I've seen that kind of "marketing speak" associated with several languages as they became popular - in fact, to go with Ehud's recent comment, there's probably a reverse correlation between a language's popularity and the substantiative content in the claims of its adherents. So one might instead ask what it is about "the herd" that reduces them to saying the equivalent of "my language rocks, your language sucks." For the most part the designers of these popular languages have seemed to be solid, smart people with enough understanding of alternatives to be aware of some of the trade-offs they've made in their design. But as the masses descend and the community grows from 1 to 10,000, the thoughtfulness disappears in the noise. Maybe it's just the standard bell shaped distribution curve in action. Certainly it should be enough to give any language designer pause in his/her desire to create The Next Big Language. This is only a syntax taste issue, so isn't so important in a sense as the semantic discussions in this thread, but I want to react about Ruby's yield, which is part of the overly complex way that Ruby treats parameters and arguments in its syntax. I don't like it. In Lisp and Smalltalk, you can pass some kind of block or lambda expression or procedure or function (not getting into the subtle differences among these things semantically) as an ordinary argument to an ordinary parameter and the callee can decide whether to invoke the passed-in block zero times or however many times and/or to communicate it on to someone else who then has the same range of decisions. This is also true in Ruby. But Ruby adds this special syntax for passing one block, outside the ordinary list of arguments. Accordingly there are special syntaxes for the parameter (yield, and the & notation) corresponding to this special argument. So every time I write a method that is going to accept a block as an argument, I have to decide whether it should be passed as an ordinary argument or as the special block argument. And every time I write a call on a method someone else wrote that accepts a block as an argument, I have to know which way to pass it. And if I am going to design a method that wants two block arguments, at least one of them has to be an ordinary argument because there can be at most one special argument, and I have to decide whether one of them should be the special and if so, which one. For example, I could implement Smalltalk's ifTrue:ifFalse: (Self's ifTrue:false:) in Ruby; I suppose the false branch should go in the special argument position. I don't know where Python comes down in this regard. Is this really such a problem? If your programming style lends towards passing lambdas as arguments to methods, your method definitions should reflect that. If on the other hand you want your block definitions to be coupled with the function that yields them, you should define your methods using the '&' notation. In [link], I respond to this. Python's designers very strongly discourage such Smalltalk-esque "flow control macros"; see the "motivation" section of PEP 343. Interesting. The PEP (Python Enhancement Proposal) refers to Raymond Chen's rant about flow-control macros. Not getting into responding to that too much, I find it nonetheless hard to avoid noting that COMMENT the snippet of the Bourne shell that Chen presents makes it obvious that just as the Bourne shell implements Algol 68, it is also written in Algol 68 TNEMMOC. Getting back to the discussion about lambdas, how in Python do you write an expression for the result of taking some collection and applying a function to each element to produce a new collection ("map" in Lisp)? In Ruby, this is a_collection.map { | each_element | each_element + 1 } for example if the function to be applied is the successor function on integers. a_collection.map { | each_element | each_element + 1 } map(funct, a_list) returns a new list that is a_list with funct applied to each element [funct(x) for x in a_list] does the same thing Or do you mean other collections than lists? Or do you mean other collections than lists? This shall work for all iterables i.e. for all classes that implement __iter__ and next() appropriately. . . . in Python. I have read (in articles or blogs I can no longer find) that Ruby does DSLs very well. Any comments on that? Btw, Ruby seems to be winning so far, no python defenders? Not to defend Python or anything, but you should take a look athe python department (especially in the "classic" archive). We pointed to many projects that use Python for cool language experimentations. Well you can consider RubyOnRails as a DSL and it seems to have many positive reports. For the ruby vs python part, I remember looking at both language and thinking that they are very similar, I'd say that Python is more adequate for beginners and ruby for more advanced people (especially if you know shell|perl). If you want to write DSL's, Tcl might be another one to look at, as you can really do a lot in that direction, to the point of defining your own control structures. To answer the original question, I agree with the other poster that compared to the more 'accademic' languages, Python and Ruby are very similar. Lately I like Ruby a bit more, but I'll come clean and admit that there's some 'bandwagon effects' there. I've written several DSLs in Ruby, all of them embedded. My experience has been that for the user, they work fairly well, but for the implementor they're not such a happy experience. Ruby, unfortunately, has a lot of syntatic and semantic oddities that tend to make doing more sophisticated things difficult. This was a major thing influencing me to switch most of my work to Haskell. I'm not so familiar with Python, but my impression is that there's not really much to pick from between the two languages: each has minor things it does better than the other, and both suffer heavily from having grown out of a very casual design process. I was recently examining Python for an internal DSL and because Python lacks overloading the assignment operator makes some things difficult; consider working with Pythons Decimal class. If the user is working with a Decimal object then assigns a float, the decimal object is lost: d = decimal("1000.001") d = d + .001 # this works because + is overloaded d = .001 # d is now not decimal and this behavior cannot change That's Python semantics obviously, but if I have Decimal like object wrapping arithmetic on numbers I'd like to allow users to work only with my objects when that's clearly what they're doing. I went through this last year and ended up using DRscheme. Will you be here all week? Every month or three I throw up my hands and say, Ruby or (...language of the day...) is all way too complicated, I'm going to use Scheme. Scheme is so simple, it's so clean. So I get stuck in, do stuff for a day or two and then throw up my hands and say who cares about syntax whether it's clean or not, it's the semantics that really matters. Schemes semantics are not simple and clean enough for me. So off I go an use Joy for a day or two. Then I throw up my hands and say it's expressiveness and power that matter, so I go back to Ruby. After piling ruby code on ruby code for a day or twenty, I start to say, I wish I could trivially parse this stuff so I could visualise, analyse, instrument, measure, refactor it, .... At which stage I started working my own language that has the simplicity of syntax of Scheme, the algebra of Joy, and the expressive semantics of Ruby, but a cleaner more analysable semantics than any of them. So after I day of hard thinking on that I find it is actually a very very hard problem I have set myself and start using Ruby again... This sounds way way way to familiar. "Why We Fight" When programmers fight about programming languages it's the fight they're interested in, not the programming languages. How do you find Haskell? It seems to me that the semantics're clearer than Scheme's and probably comparable to Joy's, and the syntax is a nice middle ground between Scheme and Ruby. yeah, wasn't it for monads and we'd have a winner. :P Am I weird for liking the things monads offer? Parsec springs to mind pretty quickly, but the many variants on the State monad (Reader and Writer being more obvious examples) also come to mind as something that offers more than I could have just by having mutable storage in the language. For parsing, modularity and stuff... Now if I could only have mutation as well in the language, you'd have a winner... Or to put it another way, ST and IO aren't good enough? Once in a while I give some thought to a language where everything's in some kind of monad-like structure and so there's no cost to suddenly needing another monad-supplied feature... ...but all of the recent work on language-level approaches to concurrency, partial/subcontinuations, Tim Sweeney's comments on "The Next Mainstream Programming Language," etc. have certainly gone a long way toward convincing me that a language that did indeed syntactically "look like" it was doing mutation, but under the covers was ST-like, and could have the syntactic sugar "stripped away" to expose more expressive power, would be a wonderful thing. Essentially you'd have an onion-peel language design, where the surface syntax was firmly in the Algol/Pascal/C tradition, but inner syntax, and the underlying semantics, would be rather more strongly ML/Haskell/Epigram/Ontic-like. The more you got stuff like Oleg's Zipper "for free," the better. But now I'm just reiterating what Tim has already said explicitly here several times. You get used to it with some practice, but I agree it's less than perfect. That said, I'm not entirely sure what you mean by "they're not easy to put together"? If you mean that writing new monads from scratch isn't easy, I'm inclined to agree in Haskell98 - it's a lot easier with GADTs, you can treat the monad as a language, build an AST type and write an interpreter (and you can get the more conventional style back by fusing the AST constructors and the interpreter, eliminating the AST type in the process and given you H98 code again) Is there a nice example of this technique (using GADTs+monads) somewhere that I can look at? It sounds interesting, in line with how I sometimes try to use monads anyways, and a good way to learn how to use GADTs. Also, as a general question -- how good is the Haskell compiler (or rather, GHC) at spotting examples of code written to "interpret" computations in some monad, and emitting code which simply performs this computation fairly directly? (Or is this not a sensible question?) Perhaps this is not really on-topic, in which case apologies... I talked about it at Anglohaskell last year - here're the slides: . Never did do a nice version of the parsing example, sadly. I know GHC can in theory pick it up with inlining and case/ctor, but I don't know how aggressively it does so and recursion can get in the way some. The slides do show you how to translate fairly mechanically into a 'shallow' style where there's no interpretation happening though. Mutation is basically the same thing as merging streams of messages indeterministically, right? Let's say there's a process consuming a stream of messages. When it processes a message, it determines the state for processing the next message to arrive on the stream, etc. And let's suppose that whoever composes a message can send some logical variable that can be told by the state at the receiving end, and asked by the sender of the message or her delegate. If the stream being processed by the process resulted from merging two or more streams from different sources, the sources can cause state changes and can observe changes caused by the other source, so you have in effect shared mutable state, right? It took me quite some time to start building them comfortably (though probably less time than it took me to learn to use OO well), but I find now that monads provide significant Good Things in my life. Monadic parsers are a classic example, but there are plenty of other things where a custom monad can significantly simplify the expression of a problem to the point where it's pretty much a DSL. I use one for some fairly complex message generation within a trading system I'm writing, and assemblers for various machine languages are another classic example. Speaking as someone who switched from Ruby to Haskell, I find it an absolute joy. Going to back to Ruby now feels to me almost as bad as going back to Java. If you think Scheme's semantics aren't simple enough, I see two broad possibilities: one is that the semantics of the lambda calculus aren't simple enough for you, and the other is that Scheme's semantic extensions to lambda calculus (mainly some data types and mutable variables) aren't simple enough for you. If it's the former, then you're in trouble, since anything simpler than the lambda calculus tends to be either quite restrictive, or low-level (think SK combinators), or both. Of course, there are some common LC features that many languages avoid, like higher-order functions — you can do that, and pay the price in terms of expressivity/power. But other than that, LC is about as basic and simple as it gets — you can't get much simpler without paying a hefty price. OTOH, if you think Scheme's extensions to the lambda calculus complicate the semantics too much, that may help figure out what you're really looking for. For example, mutable varibles are a big, practical extension with notoriously messy consequences, notably their effect on the algebraic properties of code. If you want to avoid that, then you need to look at a more purely functional language (although nothing stops you from using a purely functional subset of Scheme, btw). But to live without mutable variables in practical code, you're likely to need to look towards something like monads, linear naming, or linear typing. By comparions to these options, IMO, in practical contexts mutable variables aren't so bad, especially when used with discipline in mostly functional code. You mentioned the algebra of Joy: don't forget that LC-based languages have well-understood algebra-friendly intermediate representations such as CPS and ANF. In general, I think that you should first figure out what semantics you're interested in rather than how those semantics happen to be manifested or combined in particular languages. If it turns out that what you want is some combination of existing semantics, you might be able to make that happen in a language like Scheme. But if you need an entirely new semantics, then as you say, you've set yourself a hard problem. ...many a time. Basically it seems to me mutable does horrible things to the algebra. The very very good thing about Lambda Calculus from the Algebra point of view is the lazy evaluation of arguments. Lazy Eval + Partial Eval + Constant Folding + inlining == Good Very Things One of design goals of Joy apparently was to get rid of argument names as being irrelavent complications to the algebra. Part of my problems in life is my day job is shepherding symbols in big commercial industrial size chunks of code. Thus I'm very uncomfortable with anything I feel won't scale to that size. One of the things that makes me very uncomfortable about Joy is it is "halting problem difficult" to decide how deeply and what the stack effect of an arbitary chunk of Joy code will be. A huge benefit (from my perspective) of Lambdalike languages is the stack effect is (relative to Joy) very very simple. Furthermore the argument name binding shields client code from implementation changes. I also feel at the large code base scale OOP is very important. I'll admit I haven't managed to grok how immutable & lazy eval fits with OOP. Immutability might be achieved using immutable prototypes. I'm not sure about lazyness either but I guess it is orthogonal to the compositional mechanisms provided by OO. JavaScript would be a good target language to start with. Maybe some cooperation between JavaScript and Haskell developers could be interesting. Web 3.0 is not very far away ;) Yes! Especially the way letrec has assignments in it, and can accumulate some arbitrary amount of state that isn't in lexical variables and therefore has to be carried around in continuations... blaaaaagh. letrec So if you like pure, lazy, lambdalike languages, why isn't Haskell, or perhaps Clean, on the shortlist of languages you try to switch to every few months? Is one language closer to 'functional' programming than another? Is one language better than another for building logic programming or constraint logic programming constructs? Is one language better than another for building the kind of functionality found in concurrent languages (erlang, Oz)? My experience is that Ruby and Python are very similar. Ruby pleases the heavy OOP crowd, whereas Python is looser in that regard. I wouldn't consider either language to be mind-expanding in the way that Erlang, J, or Joy is. I think Ruby wins here. You can build a constraint "framework" with continuations, and Ruby relies on functions that use blocks (lambdas) more than python (so it is more functional). A Ruby Quiz about constraint processing: The problem of constraint programming is three fold: writing "amb" in ruby is some kind of rite of passage (I did it too, and I'm stupid :) but I don't think it qualifies ruby as a better environment for logic/constraint programming, especially if you use Kernel#callcc which is very slow. OTOH you could write a CSP library or bindings for an existing one (i.e. jcl via jruby or gecode) but that won't be better than in python. Sadly both languages miss something like Smalltalk's #become, which forces developers to explicitly request the value of a computation and thus the integration of this libraries with the rest of the environment would always be less than perfect. evil.rb has an implementation of #become if you really want it. AFAIK evil.rb's implementation is not perfect, for example a generic Object can't become a Fixnum, cause they have different representations at the interpreter level. not necessarily the langauge itself. On the other hand, the entirety of evil.rb is an artifact of the implementation so I guess you win some, you lose some. something no-one has mentioned, but which really bites you on the ass if you're using functional programming idioms in python is that scoping doesn't behave like you'd expect - i can't remember the details, but this has thrown me several times. also, functional-like constructs are slowly being removed from the core language. i like python and use it as my "mid-level" language of choice (having left perl years ago; although for "low-level" scripts i use bash), but if i were starting from scratch today i would look probably go with ruby (or the dr scheme stuff). What functional-like constructs are being removed? The state of discussion here is somewhat anachronistic ( from 2006 ) and doesn't reflect the actual Python 3.0 design decisions. The only noticable change in Python 3.0 a2 is that reduce has been moved from the builtins to the functools module i.e. a tiny refactoring. It was all just a storm in the teacup. reduce Both are solid, practical, interpreted languages that have evolved over about the same amount of time guided by the preferences of their original designer and a strong community of users. Both are strongly, dynamically, structurally typed OO languages with a bit of influence from FP. Both are slightly dynamic languages but not as much as LISP or Smalltalk (by which I mean that the language itself can be slightly programmed in itself). Both are well considered combinations of features taken from older innovative languages. It's interesting to compare the two. They are so similar but both designs have striking differences in philosophy. Python: "there's only one way to do anything." Ruby: the language is more flexible and expressive. I've used both quite extensively. I used to prefer Ruby but now find I use Python because it makes ongoing maintenance easier. It is common in Ruby (at least, when using the Rails framework for web programming) to utter some macro call in a class definition. I know that in theory Smalltalk could do anything if you monkey with the class browsers, etc., but in practice, Smalltalkers don't tend to use macros of this sort the way the Ruby practitioners do. One of the uses for such a macro, for example, is to establish accessor methods for state. Smalltalkers in developing a class may invoke some code to generate access methods to save keyboarding, but once generated, the methods are ordinary methods maintained in the usual way. If a Ruby macro generates methods, code maintainers see the macro call, not the generated code. Macros provided by the Rails framework let you economically express the relationships among entities, validation of input data, and much more. Smalltalkers ... the methods are ordinary methods maintained in the usual way Yes. economically express the relationships among entities, validation of input data, and much more And that's where Smalltalkers would do similar stuff. OK, I have started a topic on Ruby vs. Smalltalk. Would you go over there and tell me about the "similar stuff"? How are modern Smalltalkers writing macro calls similar to those Rubyists are writing? Where in ones Smalltalk code base do the calls go, in some class methods, for example? Is there an initialize method on the class side, where the class can talk to itself, and say hey, my instances are related to those of that other class, or validiate these fields, or whatever? initialize I know this is really an old topic, but none of the comments seem to mention PyPy: PyPy is a Python implementation of Python, and other dynamic languages. It serves as an excellent research platform in PL, and several advanced concepts have been added to Python as a demonstration of it. For examples, there are implementations of AOP, Logic programming, tainted values and information tracking, dynamic syntax (you can change the parser run-time) etc. etc. Please check it out if you want a platform to experiment with features of dynamic languages (not just Python). PyPy was discussed here quite a few times. Do search the archives! I'm a Ruby developer but wanted to get into Python because the implementation seems stronger and I liked the idea of reducing cruft with the indentation technique. However, I tried it out for a while and really didn't seem to get along with it, as I kept needing to look up references to know what I was doing. The one thing that REALLY bugged was that Python seemed a bit Perl-like in its object orientation.. that is, it seemed somewhat like an afterthought. The main example I can still remember is to get the length of, say, a string. You use len(string).. but why? If a string is an object, then why doesn't it have a length (or whatever you want to call it) property I can read.. string.length? It seems like there's a somewhat arbitrary choice in Python as to what's a core method and what's a method on instance objects.. and it seems very inconsistent. Was I just seeing things wrong, or is there a good reason for this sort of implementation? I found it too inconsistent to be easy to learn.. one reason why I haven't picked up PHP, way too inconsistent in function naming. Peter, I do think these general questions about Pythons design are better addressed at comp.lang.python if the Python FAQ doesn't suffice to answer them. The popular package for programming Web applications in Ruby is the Rails framework. I hear that on the Python side, there are competing frameworks for web programming. Can someone who has worked both in Ruby on Rails and in Python on <your favorite web framework for Python> compare them in terms of the amount of code that has to be maintained to achieve a given functionality on a web site? Coincidentally, I just came across this. This is starting to get pretty off topic for LtU... I don't especially object to it at this point, but web framework advocacy can quickly get out of hand. And I think there's already plenty of it elsewhere? The article below uses python to demonstrate both functional and imperative programming. They describe python as a "multi-paradigm" language. Yikes! My toe nails start curling.
http://lambda-the-ultimate.org/node/1480
CC-MAIN-2019-51
refinedweb
11,886
61.46
Introducing Julia/Working with dates and times Working with dates and times[edit | edit source] Functions for working with dates and times are provided in the standard package Dates. To use any of the time and date functions, you must do one of the following: using Dates import Dates If you use import Dates functions, you’ll need to prefix every function with an explicit Dates., e.g. Dates.dayofweek(dt), as shown in this chapter. However, if you add the line using Dates to your code, this brings all exported Dates functions into Main, and they can be used without the Dates. prefix. Types[edit | edit source] This diagram shows the relationship between the various types used to store Times, Dates, and DateTimes. Date, Time, and DateTimes[edit | edit source] There are three main datatypes available: - A Dates.Time object represents a precise moment of time in a day. It doesn't say anything about the day of the week, or the year, though. It's accurate to a nanosecond. - A Dates.Date object represents just a date: no time zones, no daylight saving issues, etc... It's accurate to, well, a day. - A Dates(Dates.now()) # a Dates.Time object 16:51:56.374 julia> birthday = Dates.Date(1997,3,15) # a Dates.Date object 1997-03-15 julia> armistice = Dates.DateTime(1918,11,11,11,11,11) # a Dates.DateTime object 1918-11-11T11:11:11 The Dates.today() function returns a Date object for the current date: julia> datetoday = Dates.today() 2014-09-02 The Dates.now() function returns a DateTime object for the current instant in time: julia> datetimenow = Dates.now() 2014-09-02T08:20:07.437 (We used Dates.now() earlier to define rightnow,> vacation = Dates.DateTime("2014-09-02T08:20:07") # defaults to expecting ISO8601 format 2014-09-02T08:20:07 See Date Formatting below for more examples. Date and time queries[edit | edit source] Once you have a date/time or date object, you can extract information from it with the following functions. For both date and datetime objects, you can obtain the year, month, day, and so on:() (days of week in month) function tells you how many days there are in the month with the same day name as the specified day — there are five Tuesdays in the current month (at the time of writing). The last function, dayofweekofmonth(birthday) (day of week of month), tells us that the 15th of March, 1997, was the third Saturday of the month. You can also find days relative to a date, such as the first day of the week containing that day, using the adjusting functions, described below. Date arithmetic[edit | edit source]> Dates.now() - Dates.Month(6) 2014-03-02T16:43:08 and similarly for months, weeks: julia> Dates.now() - Dates.Year(2) - Dates.Month(6) 2012-03-02T16:44:03 and similarly for weeks and hours. Here's the date and time for two weeks and 12 hours from now: julia> Dates.now() + Dates.Week(2) + Dates.Hour(12) 2015-09-18T20:49:16 and there are julia> daystoxmas = Dates.Date(Dates.year(Dates.now()), 12, 25) - Dates.today() 148 days or 148 (shopping) days till Christmas (at the time this was written). To retrieve the value as a number, use the function Dates.value(): julia> Dates.value(daystoxmas) 148 This works with different types of date/time objects too: julia> lastchristmas = Dates.now() - Dates.DateTime(2017, 12, 25, 0, 0, 0) 25464746504 milliseconds julia> Dates.value(lastchristmas) 25464746504 Range of dates[edit | edit source] You can make iterable range objects that define a range of dates: julia> d = Dates.Date(1980,1,1):Dates.Month(3):Dates.Date(2019,1,1) 1980-01-01:3 months:2019 2016-04-01 2016-07-01 2018-01-01 2018-10-01 2019-01-01 Similarly, here's a range of times 3 hours apart from now, for a year hence: julia> d = collect(Dates.DateTime(Dates.now()):Dates.Hour(3):Dates.DateTime(Dates 2018, the following code shows how the due date creeps forward every month: julia> foreach(d -> println(Dates.format(d, "d u yyyy")), Dates.Date("2018-01-01"):Dates.Day(30):Dates.Date("2019-01-01")) 1 Jan 2018 31 Jan 2018 2 Mar 2018 1 Apr 2018 1 May 2018 31 May 2018 30 Jun 2018 30 Jul 2018 29 Aug 2018 28 Sep 2018 28 Oct 2018 27 Nov 2018 27 Dec 2018 Date formatting[edit | edit source]> Dates.Date("Fri, 15 Jun 2018", "e, d u y") 2018-06-15 julia> Dates.DateTime("Fri, 15 Jun 2018 11:43:14", "e, d u y H:M:S") 2018-06-15T11:43:14 Other characters are used literally. In the second example, the formatting characters matched up as follows: Fri, 15 Jun 2018 11:43:14 e , d u y H: M: S You can supply a format string to Dates.format to format a date object. In the formatting string, you repeat the characters to control how years and days, for example, are output: julia> timenow = Dates = Dates> Dates.Date.([ # broadcast > Dates(Dates.now(), Dates.RFC1123Format) "Sat, 30 Jul 2016 16:36:09" Date adjustments[edit | edit source] which you could also write using the function chain operator: julia> Dates.now() |> Dates.firstdayofweek |> Dates.dayname "Monday" can find the first Sunday, or Thursday, or whatever, of a month. Monday is 1, Tuesday 2, etc. julia> Dates.tofirst(birthday, 1) # the first Monday (1) of that month 1997-03-03 Supply the keyword argument of=Year to get the first matching weekday of the year. julia> Dates.tofirst(birthday, 1, of=Year) # the first Monday (1) of 1997 1997-01-06 Rounding dates and times[edit | edit source] You can use round(), floor(), and ceil(), usually used to round numbers up or down to the nearest preferred values, to adjust dates forward or backwards in time so that they have 'rounder' values. julia> Dates.now() 2016-09-12T17:55:11.378 julia> Dates.format(round(Dates.DateTime(Dates | edit source].Day.Day(1) | edit source] The Dates module also offers a unix2datetime() function, which converts a Unix time value to a date/time object: julia> Dates.unix2datetime(time()) 2014-10-24T09:26:29.305 Moments in time[edit | edit source] DateTimes are stored as milliseconds, in the field instant. Use Dates.value to obtain the value. julia> moment=Dates.now() 2017-02-01T12:45:46.326 julia> Dates.value(moment) 63621636346326 julia> moment.instant Base.Dates.UTInstant{Base.Dates.Millisecond}(63621636346326 milliseconds) If you use the more precise Dates.Time type, you can access nanoseconds. julia> moment = Dates.Time(Dates.now()) 17:38:44.33 julia> Dates.value(moment) 63524330000000 julia> moment.instant 63524330000000 nanoseconds Timing and monitoring[edit | edit source])
https://en.wikibooks.org/wiki/Introducing_Julia/Working_with_dates_and_times
CC-MAIN-2022-33
refinedweb
1,135
66.54
Drag and Drop# An overview of the drag and drop system provided by Qt. Drag and drop provides a simple visual mechanism which users can use to transfer information between and within applications. Drag and drop is similar in function to the clipboard’s cut and paste mechanism. This document describes the basic drag and drop mechanism and outlines the approach used to enable it in custom controls. Drag and drop operations are also supported by many of Qt’s controls, such as the item views and graphics view framework, as well as editing controls for Qt Widgets and Qt Quick. More information about item views and graphics view is available in Using drag and drop with item views and Graphics View Framework . Drag and Drop Classes# These classes deal with drag and drop and the necessary mime type encoding and decoding. Configuration# The QStyleHints object provides some properties that are related to drag and drop operations: - startDragTime()describes the amount of time in milliseconds that the user must hold down a mouse button over an object before a drag will begin. - startDragDistance()indicates how far the user has to move the mouse while holding down a mouse button before the movement will be interpreted as dragging. - startDragVelocity()indicates how fast (in pixels/second) the user has to move the mouse to start a drag. A value of 0means that there is no such limit. These quantities provide sensible default values that are compliant with the underlying windowing system for you to use if you provide drag and drop support in your controls. Drag and Drop in Qt Quick# The rest of the document focuses mainly on how to implement drag and drop in C++. For using drag and drop inside a Qt Quick scene, please read the documentation for the Qt Quick Drag , DragEvent , and DropArea items, as well as the Qt Quick Drag and Drop examples. Dragging# To start a drag, create a QDrag object, and call its exec() function. In most applications, it is a good idea to begin a drag and drop operation only after a mouse button has been pressed and the cursor has been moved a certain distance. However, the simplest way to enable dragging from a widget is to reimplement the widget’s mousePressEvent() and start a drag and drop operation: def mousePressEvent(self, event): if (event.button() == Qt.LeftButton and iconLabel.geometry().contains(event.pos())) { drag = QDrag(self) mimeData = QMimeData() mimeData.setText(commentEdit.toPlainText()) drag.setMimeData(mimeData) drag.setPixmap(iconPixmap) Qt.DropAction dropAction = drag.exec() ... Although the user may take some time to complete the dragging operation, as far as the application is concerned the exec() function is a blocking function that returns with one of several values . These indicate how the operation ended, and are described in more detail below. Note that the exec() function does not block the main event loop. For widgets that need to distinguish between mouse clicks and drags, it is useful to reimplement the widget’s mousePressEvent() function to record to start position of the drag: def mousePressEvent(self, event): if (event.button() == Qt.LeftButton) dragStartPosition = event.pos() Later, in mouseMoveEvent() , we can determine whether a drag should begin, and construct a drag object to handle the operation:) ... This particular approach uses the manhattanLength() function to get a rough estimate of the distance between where the mouse click occurred and the current cursor position. This function trades accuracy for speed, and is usually suitable for this purpose. Dropping# To be able to receive media dropped on a widget, call setAcceptDrops(true) for the widget, and reimplement the dragEnterEvent() and dropEvent() event handler functions. For example, the following code enables drop events in the constructor of a QWidget subclass, making it possible to usefully implement drop event handlers: def __init__(self, parent): QWidget.__init__(self, parent) ... setAcceptDrops(True) The dragEnterEvent() function is typically used to inform Qt about the types of data that the widget accepts. You must reimplement this function if you want to receive either QDragMoveEvent or QDropEvent in your reimplementations of dragMoveEvent() and dropEvent() . The following code shows how dragEnterEvent() can be reimplemented to tell the drag and drop system that we can only handle plain text: def dragEnterEvent(self, event): if (event.mimeData().hasFormat("text/plain")) event.acceptProposedAction() The dropEvent() is used to unpack dropped data and handle it in way that is suitable for your application. In the following code, the text supplied in the event is passed to a QTextBrowser and a QComboBox is filled with the list of MIME types that are used to describe the data: def dropEvent(self, event): textBrowser.setPlainText(event.mimeData().text()) mimeTypeCombo.clear() mimeTypeCombo.addItems(event.mimeData().formats()) event.acceptProposedAction() In this case, we accept the proposed action without checking what it is. In a real world application, it may be necessary to return from the dropEvent() function without accepting the proposed action or handling the data if the action is not relevant. For example, we may choose to ignore LinkAction actions if we do not support links to external sources in our application. Overriding Proposed Actions# We may also ignore the proposed action, and perform some other action on the data. To do this, we would call the event object’s setDropAction() with the preferred action from DropAction before calling accept() . This ensures that the replacement drop action is used instead of the proposed action. For more sophisticated applications, reimplementing dragMoveEvent() and dragLeaveEvent() will let you make certain parts of your widgets sensitive to drop events, and give you more control over drag and drop in your application. Subclassing Complex Widgets# Certain standard Qt widgets provide their own support for drag and drop. When subclassing these widgets, it may be necessary to reimplement dragMoveEvent() in addition to dragEnterEvent() and dropEvent() to prevent the base class from providing default drag and drop handling, and to handle any special cases you are interested in. Drag and Drop Actions# In the simplest case, the target of a drag and drop action receives a copy of the data being dragged, and the source decides whether to delete the original. This is described by the CopyAction action. The target may also choose to handle other actions, specifically the MoveAction and LinkAction actions. If the source calls exec() , and it returns MoveAction, the source is responsible for deleting any original data if it chooses to do so. The QMimeData and QDrag objects created by the source widget should not be deleted - they will be destroyed by Qt. The target is responsible for taking ownership of the data sent in the drag and drop operation; this is usually done by keeping references to the data. If the target understands the LinkAction action, it should store its own reference to the original information; the source does not need to perform any further processing on the data. The most common use of drag and drop actions is when performing a Move within the same widget; see the section on Drop Actions for more information about this feature. The other major use of drag actions is when using a reference type such as text/uri-list, where the dragged data are actually references to files or objects. Adding New Drag and Drop Types# Drag and drop is not limited to text and images. Any type of information can be transferred in a drag and drop operation. To drag information between applications, the applications must be able to indicate to each other which data formats they can accept and which they can produce. This is achieved using MIME types . The QDrag object constructed by the source contains a list of MIME types that it uses to represent the data (ordered from most appropriate to least appropriate), and the drop target uses one of these to access the data. For common data types, the convenience functions handle the MIME types used transparently but, for custom data types, it is necessary to state them explicitly. To implement drag and drop actions for a type of information that is not covered by the QDrag convenience functions, the first and most important step is to look for existing formats that are appropriate: The Internet Assigned Numbers Authority ( IANA ) provides a hierarchical list of MIME media types at the Information Sciences Institute ( ISI ). Using standard MIME types maximizes the interoperability of your application with other software now and in the future. To support an additional media type, simply set the data in the QMimeData object with the setData() function, supplying the full MIME type and a QByteArray containing the data in the appropriate format. The following code takes a pixmap from a label and stores it as a Portable Network Graphics (PNG) file in a QMimeData object: output = QByteArray() outputBuffer = QBuffer(output) outputBuffer.open(QIODevice.WriteOnly) imageLabel.pixmap().toImage().save(outputBuffer, "PNG") mimeData.setData("image/png", output) Of course, for this case we could have simply used setImageData() instead to supply image data in a variety of formats: mimeData.setImageData(QVariant(imageLabel.pixmap())) The QByteArray approach is still useful in this case because it provides greater control over the amount of data stored in the QMimeData object. Note that custom datatypes used in item views must be declared as meta objects and that stream operators for them must be implemented. Drop Actions# In the clipboard model, the user can cut or copy the source information, then later paste it. Similarly in the drag and drop model, the user can drag a copy of the information or they can drag the information itself to a new place (moving it). The drag and drop model has an additional complication for the programmer: The program doesn’t know whether the user wants to cut or copy the information until the operation is complete. This often makes no difference when dragging information between applications, but within an application it is important to check which drop action was used. We can reimplement the mouseMoveEvent() for a widget, and start a drag and drop operation with a combination of possible drop actions. For example, we may want to ensure that dragging always moves objects in the widget:) ... The action returned by the exec() function may default to a CopyAction if the information is dropped into another application but, if it is dropped in another widget in the same application, we may obtain a different drop action. The proposed drop actions can be filtered in a widget’s dragMoveEvent() function. However, it is possible to accept all proposed actions in the dragEnterEvent() and let the user decide which they want to accept later: def dragEnterEvent(self, event): event.acceptProposedAction() When a drop occurs in the widget, the dropEvent() handler function is called, and we can deal with each possible action in turn. First, we deal with drag and drop operations within the same widget: def dropEvent(self, event): if (event.source() == self and event.possibleActions() Qt.MoveAction) return In this case, we refuse to deal with move operations. Each type of drop action that we accept is checked and dealt with accordingly: if (event.proposedAction() == Qt.MoveAction) { event.acceptProposedAction() # Process the data from the event. } else if (event.proposedAction() == Qt.CopyAction) { event.acceptProposedAction() # Process the data from the event. else: # Ignore the drop. return ... Note that we checked for individual drop actions in the above code. As mentioned above in the section on Overriding Proposed Actions , it is sometimes necessary to override the proposed drop action and choose a different one from the selection of possible drop actions. To do this, you need to check for the presence of each action in the value supplied by the event’s possibleActions() , set the drop action with setDropAction() , and call accept() . Drop Rectangles# The widget’s dragMoveEvent() can be used to restrict drops to certain parts of the widget by only accepting the proposed drop actions when the cursor is within those areas. For example, the following code accepts any proposed drop actions when the cursor is over a child widget ( dropFrame): def dragMoveEvent(self, event): if (event.mimeData().hasFormat("text/plain") and event.answerRect().intersects(dropFrame.geometry())) event.acceptProposedAction() The dragMoveEvent() can also be used if you need to give visual feedback during a drag and drop operation, to scroll the window, or whatever is appropriate. The Clipboard# Applications can also communicate with each other by putting data on the clipboard. To access this, you need to obtain a QClipboard object from the QApplication object. The QMimeData class is used to represent data that is transferred to and from the clipboard. To put data on the clipboard, you can use the setText(), setImage(), and setPixmap() convenience functions for common data types. These functions are similar to those found in the QMimeData class, except that they also take an additional argument that controls where the data is stored: If Clipboard is specified, the data is placed on the clipboard; if Selection is specified, the data is placed in the mouse selection (on X11 only). By default, data is put on the clipboard. For example, we can copy the contents of a QLineEdit to the clipboard with the following code: QGuiApplication.clipboard().setText(lineEdit.text(), QClipboard.Clipboard) Data with different MIME types can also be put on the clipboard. Construct a QMimeData object and set data with setData() function in the way described in the previous section; this object can then be put on the clipboard with the setMimeData() function. The QClipboard class can notify the application about changes to the data it contains via its dataChanged() signal. For example, we can monitor the clipboard by connecting this signal to a slot in a widget: connect(clipboard, QClipboard.dataChanged, self, ClipWindow::updateClipboard) The slot connected to this signal can read the data on the clipboard using one of the MIME types that can be used to represent it: def updateClipboard(self): mimeTypeCombo.clear() formats = clipboard.mimeData().formats() if (formats.isEmpty()) return for format in formats: data = clipboard.mimeData().data(format) # ... The selectionChanged() signal can be used on X11 to monitor the mouse selection. Examples# Interoperating with Other Applications# On X11, the public XDND protocol is used, while on Windows Qt uses the OLE standard, and Qt for macOS macOS.
https://doc-snapshots.qt.io/qtforpython-dev/overviews/dnd.html
CC-MAIN-2022-21
refinedweb
2,369
50.46
#include <ATM_Connector.h> #include <ATM_Connector.h> Collaboration diagram for ACE_ATM_Connector: Default constructor. ACE_ATM_Params() ACE_ATM_QoS() 0 ACE_ATM_Addr("", 0) Actively connect and produce a <new_stream> if things go well. The <remote_sap> is the address that we are trying to connect with. The <params> are the parameters needed for either socket or XTI/ATM connections._ATM_Addr::sap_any> then the user is letting the OS do the binding. If <reuse_addr> == 1 then the <local_addr> is reused, even if it hasn't been cleanedup yet. Actively add a leaf to the root (i.e., point-to-multipoint). The <remote_sap> is the address of the leaf that we are trying to add. Try to complete a non-blocking connection. If connection completion is successful then <new_stream> contains the connected ACE_SOCK_Stream. If <remote_sap> is non-NULL then it will contain the address of the connected peer. Dump the state of an object. Resets any event associations on this handle. Declare the dynamic allocation hooks. [private]
http://www.theaceorb.com/1.4a/doxygen/ace/classACE__ATM__Connector.html
CC-MAIN-2017-51
refinedweb
158
53.07
2008/11/4 andrzej zaborowski <address@hidden>: > 2008/11/2 Dmitry Baryshkov <address@hidden>: > [...] >> As a second thought I've implemented blanking, implemented your >> suggestions. Please take a look at the following patch. > > Thanks, this looks better. Good! > >> >> From 57cd66fefc31ac18f1f896a8ca53441b01f0d345 Mon Sep 17 00:00:00 2001 >> From: Dmitry Baryshkov <address@hidden> >> Date: Thu, 11 Sep 2008 04:39:15 +0400 >> Subject: [PATCH] tc6393xb: non-accelerated FB support >> >> Signed-off-by: Dmitry Baryshkov <address@hidden> >> --- >> hw/devices.h | 4 +- >> hw/tc6393xb.c | 120 >> +++++++++++++++++++++++++++++++++++++++++++++++- >> hw/tc6393xb_template.h | 72 +++++++++++++++++++++++++++++ >> hw/tosa.c | 17 +++++-- >> 4 files changed, 205 insertions(+), 8 deletions(-) >> create mode 100644 hw/tc6393xb_template.h >> + >> +#if BITS == 8 >> +# define SET_PIXEL(addr, color) *(uint8_t*)addr = color; >> +#elif BITS == 15 || BITS == 16 >> +# define SET_PIXEL(addr, color) *(uint16_t*)addr = color; >> +#elif BITS == 24 >> +# define SET_PIXEL(addr, color) \ >> + addr[0] = color; addr[1] = (color) >> 8; addr[2] = (color) >> 16; >> +#elif BITS == 32 >> +# define SET_PIXEL(addr, color) *(uint32_t*)addr = color; >> +#else >> +# error unknown bit depth >> +#endif > > Hmm.. now when I look at this it triggers an alert because > *(uint16_t *) addr = color; > and > addr[0] = coor; addr[1] = (color) >> 8; > do different things on a bigendian host. But I can't tell offhand > which one we want here... > > pxa2xx_template.h and pl110_template.h might have the same problem. >From a quick glance most other video emulators will suffer from this problem. IMO we can merge this as is and later fix this if the problem really exists on BE hosts. -- With best wishes Dmitry
http://lists.gnu.org/archive/html/qemu-devel/2008-11/msg00141.html
CC-MAIN-2016-36
refinedweb
247
59.19
Intro to Decentralized Databases with GUN.js Databases are an import component of software systems and are often themselves distributed for purposes of high availability. In these scenarios, the data are ultimately under a centralized model. The abstracted database architecture is the authority and results in a mechanism that maintains the current state of the data. Looking from the perspective of a decentralized system, each participant or node would own their data and share with nodes across the network as needed. This style of architecture is referred to as peer-to-peer or p2p and is used in file sharing protocols like BitTorrent. GUN.js is a real-time, decentralized, offline first, graph database. Sure is a mouthful. Knowing a little bit about decentralized architecture we can understand the other features. Under the hood, GUN allows for data synchronization to happen seamlessly between all connected nodes by default. It’s offline first capabilities mean that if connectivity is lost to other nodes due to a network error or no availability, the application will store all changes locally and automatically synchronize as soon there is a connection. Finally, the flexible data storage model allows for tables with relations (MSSQL or MySQL), tree-structured document orientation (MongoDB), or a graph with circular references (Neo4j). In this article, we are going to build a simple note taking application in React using GUN that will update real time between two different clients. This will help to illustrate how the key features of GUN look in action. I have a simple React boilerplate project that I use for some of my projects in order to avoid recreating an empty project. I really recommend doing this for beginners to become familiar with the libraries used and the build tools. There are plenty of starting projects and command line tools used to generate React projects, but those are recommended after you’ve gained proficient understanding. Let’s begin by adding the Gun.js library: $yarn add gun --save The project comes with a server.js file that creates an HTTP server using Express.js. It has been configured to run both for development and production environments. Here we will add a GUN datastore that clients will connect to and for our purposes, the data will persist in a JSON file on our server. I’ve created a directory /db and the file data.json where GUN will write to: ├── LICENSE.MD ├── README.md ├── db │ └── data.json ├── images │ └── favicon.ico ├── package.json ├── server.js ├── src │ ├── components │ │ ├── App.js │ │ ├── Auth.js │ │ ├── Home.js │ │ └── NoteForm.js │ ├── index.html │ └── index.js ├── test │ └── App.test.js ├── webpack.config.js └── yarn.lock Modifying server.js add the following: const Gun = require('gun'); ... app.use(Gun.serve); const server = app.listen(port); Gun({ file: 'db/data.json', web: server }); Please keep in mind that this setup is not meant for production, please refer to the documentation for configuring Amazon S3 and a module that utilizes LevelDB. Earlier I explained how GUN is a distributed database, where it can have many nodes connecting to each other, so we will add the library to the front-end. In the previous step we added the server node for all client nodes to connect to, we’ll pass the URL to it as a configuration for the client stores to synchronize with. For now, we’ll keep the database operations in src/components/App.js: import React, { Component } from 'react'; import Gun from 'gun'; import Home from './Home'; class App extends Component { constructor() { super(); this.gun = Gun(location.origin + '/gun'); window.gun = this.gun; //To have access to gun object in browser console } render() { return ( < Homegun = { this.gun } /> ); } } export default App; To test that this works we’ll run the application. In the package.json file there is a section for scripts that can be run and the start command will run the development server: $yarn start Open up a browser window and navigate to to see the home page. In that window open the developer tools. In the console, we will run some commands to interact with the database to see that it works on the client and that it synchronizes with the server peer node. var note = {title: 'first item', text: 'from command line'}; gun.put(note); Inspecting db/data.json in our project we can see there is data similar to this: { "graph": { "EVz9V7xwmMW2MZBGHkwAntex": { "_": { "#": "EVz9V7xwmMW2MZBGHkwAntex", ">": { "title": 1498156296164.74, "text": 1498156296164.74 } }, "title": "first item", "text": "from command line" } } } This can be verified in your localStorage of the browser as well by finding a similar key/value pair: gun/g0ZMK77W4wwVEHuyXzlPdVgc {"_": {"#":"EVz9V7xwmMW2MZBGHkwAntex", ">": {"title":1498156296164.74, "text":1498156296164.74}}, "title": "first item","text":"from command line" } So what exactly happened? We’ve just successfully stored a note into the GUN database with the JSON data from the variable note and some extra data. Compared to original data we can deduce that “_” is a metadata object created by GUN. The document is assigned a unique id ‘#’ or “soul” in GUN parlance and another child object ‘>’ containing the timestamp when a field was last updated. For good measure, let’s open up a new incognito window to the localhost URL to verify that we can access this data. When you inspect the localStorage you will notice that it is empty. This is because this node has not yet retrieved or subscribed to any data. Using the .get() call we go ahead and call it by chaining the .on() function: gun.get('g0ZMK77W4wwVEHuyXzlPdVgc').on(function(data, key){ console.log(data, key); }) And now the same data will show up in the localStorage of this window as seen in our first instance. In the documentation, you will see that there are two methods for getting the data by chaining .on() or .val(). With the above example, we’ve subscribed to the data, which will give us real-time updates to the object. .val() is used to only get the data at the time of the call with no future updates. What is important to note here is that if we don’t explicitly make that .get() call in the new node of the application, that node will not know of that key/value pair. Let’s take a step back and consider how to design the data modeling for our sample application. Since we would like to work with a list of notes, we need to consider how that can be stored in GUN. Looking at the documentation we can see that .put() only accepts objects, strings, numbers, booleans, and null. The resulting object will automatically name the object based on its soul, the unique id. A deeper read on the .get() call shows that it can be chained with .put() to set the name of the key: gun.get('key').put({property: 'value'}) gun.get('key').on(function(data, key){ // {property: 'value'}, 'key' }) For the UI framework we will go with the React version of Bootstrap: yarn add react-bootstrap --save I’ve gone ahead and created another component called NoteForm to keep the Home component from being too cluttered. Though this isn’t the cleanest design practice, it serves to help teach us how to use GUN with React. NoteForm.js abstracts the UI for the form: import React, { Component } from 'react'; import { Panel, ButtonToolbar, Button, FormGroup, ControlLabel, FormControl } from 'react-bootstrap'; class NoteForm extends Component { componentWillMount() { this.resetState = this.resetState.bind(this); this.resetState(); } componentWillReceiveProps(nextProps) { const { id, title, text } = nextProps.note; this.setState({ id, title, text }); } resetState() { const { id, title, text } = this.props.note; this.setState({ id, title, text }); } onInputChange(event) { let obj = {}; obj[event.target.id] = event.target.value; this.setState(obj); } saveBtnClick() { this.props.onSaveClick(this.state); } render() { return (<Panel bsStyle = "primary"> <form> <FormGroup> <ControlLabel> Title</ControlLabel><FormControl id = "title" type = "text" placeholder = "Enter a title" value = { this.state.title } onChange = { this.onInputChange.bind(this) } /></FormGroup><FormGroup> <ControlLabel> Note text:</ControlLabel><FormControl id = "text" componentClass = "textarea" placeholder = "..." value = { this.state.text } onChange = { this.onInputChange.bind(this) } /></FormGroup><ButtonToolbar> <Button bsStyle = "primary" onClick = { this.saveBtnClick.bind(this) }> Save</Button><Button onClick = { this.resetState }> Cancel</Button></ButtonToolbar></form></Panel> ); } } export default NoteForm; Home.js subscribes to the data and updates it. The list rendering is managed here as well: import React, { Component } from 'react'; import { Panel, Button, Col, ListGroup, ListGroupItem } from 'react-bootstrap'; import Gun from 'gun'; import _ from 'lodash'; import NoteForm from './NoteForm'; const newNote = { id: '', title: '', text: '' }; class Home extends Component { constructor({ gun }) { super() this.gun = gun; this.notesRef = gun.get('notes'); this.state = { notes: [], currentId: '' }; } componentWillMount() { let notes = this.state.notes; const self = this; this.gun.get('notes').on((n) => { var idList = _.reduce(n['_']['>'], function(result, value, key) { if (self.state.currentId === '') { self.setState({ currentId: key }); } let data = { id: key, date: value }; self.gun.get(key).on((note, key) => { const merged = _.merge(data, _.pick(note, ['title', 'text'])); const index = _.findIndex(notes, (o) => { return o.id === key }); if (index>= 0) { notes[index] = merged; } else { notes.push(merged); } self.setState({ notes }); }) }, []); }) } newNoteBtnClick() { this.setState({ currentId: '' }); } itemClick(event) { this.setState({ currentId: event.target.id }); } getCurrentNote() { const index = _.findIndex(this.state.notes, (o) => { return o.id === this.state.currentId }); const note = this.state.notes[index] || newNote; return note; } getNoteItem(note) { return (<ListGroupItem key = { note.id } id = { note.id } onClick = { this.itemClick.bind(this) }> { note.title }</ListGroupItem>) } onSaveClick(data) { const note = _.pick(data, ['title', 'text']); if (data.id !== '') { this.gun.get(data.id).put(note); } else { this.notesRef.set(this.gun.put(note)) } } render() { this.getCurrentNote = this.getCurrentNote.bind(this); return (<div> <Col xs = { 4 }> <Panel defaultExpanded <Button bsStyle = "primary" block onClick = { this.newNoteBtnClick.bind(this) }> New Note</Button><ListGroup fill> { this.state.notes.map(this.getNoteItem.bind(this)) }</ListGroup></Panel></Col><Col xs = { 8 }> <NoteForm note = { this.getCurrentNote() } onSaveClick = { this.onSaveClick.bind(this) } /></Col></div> ); } } export default Home; The two important functions to observe in Home.js are componentWillMount() and onSaveClick() . When the component is mounted the .on() calls subscribes the component first to ‘notes’. The code in the callback is triggered on first when initially called and then each subsequent time when a new note is added. As notes is a list of references to actual note objects, additions are the only changes that will happen. Inside the callback, we see _.reduce() call that goes through each note reference and creates an individual subscription for each note. The callback inside of self.gun.get(key).on((note, key) => { ... }) are triggered when that specific note is updated. onSaveClick() saves the new note or changes to an existing one. When a new note is created this.gun.put(note) returns a reference to the note which is added to the exists set inside of ‘notes’. Open an incognito tab as we did, in the beginning, to see how the real-time updates show up in the UI as you add and edit new notes. We’ve created a very simple note taking application that synchronizes the data across all connected peers. From here it would be useful to create a user authentication component and expand on the data model to include security and ownership to lists and individual notes. GUN is an extremely powerful yet minimal database system that can be utilized in a wide variety of scenarios for web and mobile. It is highly recommended digging deeper into the documentation to learn about more features and the computer science theory behind its design. GUN has very active and friendly community on Gitter as well that you can reach out to. For access to the completed project code go this repo. The post Intro to Decentralized Databases with GUN.js appeared first on appendTo.
http://brianyang.com/intro-to-decentralized-databases-with-gun-js/
CC-MAIN-2017-47
refinedweb
1,946
50.84
Template:TR - Purpose This template presents a single template to access sub-pages of major Trainz reference 'Tabs' (divisions) making a proper link to Trainz/Kinds/subpage, Trainz/refs/subpage and Trainz/tags/subpage when given {{{2}}} as the subpage link. Otherwise it connects to each of those parent division pages. - usage This template forms an internal link to the specified Trainz Books refs pages (Wikibook sub-pages or Chapters) (with the case-insensitive controlling parameter {{{1}}} being used to switch which chapter and table of contents is being selected. The first parameter ('1' or {{{1}}}) also varies formatting of the displayed link and how the subpage name parameter {{{2}}} is interpreted during link formation. - In general, if {{{1}}}contains the letter 'D'the template processes {{{2}}}as a 'literal' or 'direct link' case, and otherwise (no 'd' or 'D' in '1')... will append suffixed keywords tag, container, or prefix KIND to the keyword. - These forms correspond directly to the same page names as pages in the TrainzOnline Wiki main namespace—the reference and normally the source page for the EXPANDED material added in the pages here. - Linking externally - The one deviation is we always omit the encapsulating quote marks and link to the N3V wiki to any apropos redirect page title instead -- that is a page without paired quotes in the name... Note the Template: N3V may be used to make a direct link to ANY OF the N3V wiki namespace pages--given a partial url (interim, TOBE CONTINUED), and Template:Plain link will take any url and connect to a web page given a full URL. - The template also handles certain singular or plural suffixing on the pass parameter. Experience shows that if the page exists, the link needs must be the other form. So for example if the page is an important tag keyword, and {{TR|T|tag-name}} is a redlink, try {{TR|S|tagname}} or {{TR|TS... or {{TR|ST... }}, each being valid '1' placeholder parameter values handled by the template logic for class tags. For any class data case, link formation may also contain a section title. This is specified by defining one of the explicit parameters 'sect', 's', or 'sect' and these must be lower case. ( 's' != 'S', nor does SECT equate to sect). - Detailed usage - When the {{{1}}} parameter is undefined, the template links to the Trainz/refs page which holds the reference pages Table of Contents. This link is shown above this usage as most of the template becomes irrelevant (vaporware!). - In implementation, to make this template serve as many uses as possible, this style template operates slightly differently than a direct linking template: - Those that append or Prefix a key word (suffix==container while KIND==prefix, and the tags are suffixed either tag or tags) also have a direct mode for passing the full spelling of their string. (See table under, containers, Kinds, and Tags only.) {{{1}}}has these legal values and forms the matching links: - '|CON|C=' selects the page Trainz/containers (TOC) or one of it's sub-pages by building a link [[ Trainz/containers/{{{2}}} container#3| {{{3|{{{2|}}} container}}} ]]? (a space+container added as suffix) • The #3 represents a logical test to see if {{{3}}} pass parameteris defined, and if so, appends it as a section link to a anchor or section title (tags on kind and containers pages have sections detailing their scope and use) as part of the link. • As always with section linking, the {{{3}}} parameter must use underscores for spaces. • For the benefit of those less familiar with template parsing, will form a link to the page addressed. In this example, to the 'References TOC notations' section of Trainz/refs. • {{Col|While not shown again, the {{{3}}} parameter is tested and appended when defined to all links formed by any and all of the following options as well. - '|KD|DK= (kind's 'direct mode') selects the page [[Trainz/Kinds/{{{2|}}}#3|{{{3|{{{2|}}}}}} ]] (TOC) or one of it's sub-pages by building a link [[ Trainz/Kinds/KIND {{{2}}}|{{{3| KIND {{{2|}}}}}} ]] • #3 is as advertised... - '|KIND|K|KINDS= selects the page Trainz/Kinds (TOC) or one of it's sub-pages by building a link [[ Trainz/Kinds/KIND {{{2}}}| KIND {{{3|{{{2|}}}}}} ]] - '|L|DIR|DIRECT= selects the page [[Trainz/{{{2}}}|{{{3|{{{2|}}}}}} ]](the Main / TOC page) or one of it's sub-pages in the same way. - Similarly, '|A|AM|AM&C= selects the page [[Trainz/AM&C/{{{2}}}|{{{3|{{{2|}}}}}} ]](the AM&C / TOC page) or one of it's sub-pages in the same way. - Similarly, '|CC= selects the page [[Trainz/content Creation/{{{2}}}|{{{3|{{{2|}}}}}} ]](the Content Creation TOC page) or one of it's sub-pages in the same way. - '|TAG1|T1|T=' selects the page Trainz/tags(Sub-TOC) or one of it's sub-pages by building a link [[ Trainz/tags/{{{2}}} tag#3| {{{3|{{{2|}}} tag}}} ]] - Similarly, '|ST|S|TS|TAGS|T2= selects the page a [[Trainz/tags/{{{2}}}|{{{3|{{{2|}}} tags}}} ]](the same sub-TOC page) or one of it's sub-pages in the same way, but suffix is plural, which names end with 'S'. - Likely Reference 'chapters' (major Sub-TOC's and topic groups) as currently schemed/expected (in mid-May 2014) are: /Appendixes, /tags, /containers, Kinds, /Scenery, /Scripting, /Splines, /Tracks, /Trackside,etc. (partial and conjectural) As of mid-August, only Kinds, tags, and containers have any population pages. - IN ALL Cases, including the below the parameter '|p=' so (p=pretty-name) forms a over-riding 'pretty name' for display of the desired link-to-page of the 'Trainz refs division' of the Trainz Wikibook projects. - It is in fact the case for all Trainz linking templates by design. In-line Examples: - {{TR|DC|queues container}} links as if written: [[Trainz/containers/queues container]]and the actual link and display - generated is: queues containershows this mode. - Similarly to link a sub-section: {{TR|DC|queues|custom-attachments}}links as if written: [[Trainz/containers/queues container#custom-attachments|custom-attachments]]and the link - displays only: custom-attachments - But! ... is much easier to edit around on a page when trying to get the best prose presentation.
https://en.wikibooks.org/wiki/Template:TR
CC-MAIN-2016-40
refinedweb
1,033
55.98
Tail Recursive Functions (in Scala) Turning imperative algorithms to tail-recursive functions isn’t necessarily obvious. In this article (and video) I’m showing you the trick you need, and in doing so, we’ll discover the Zen of Functional Programming. Choose between watching the video on YouTube (linked above), or reading the article (below), or both. The Trick # Let’s start with a simple function that calculates the length of a list: def len(l: List[_]): Int = l match { case Nil => 0 case _ :: tail => len(tail) + 1 } It’s a recursive function with a definition that is mathematically correct. However, if we try to test it, this will fail with a StackOverflowError: len(List.fill(100000)(1)) The problem is that the input list is too big. And because the VM still has work to do after that recursive call, needing to do a + 1, the call isn’t in “tail position”, so the call-stack must be used. A StackOverflowError is a memory error, and in this case it’s a correctness issue, because the function will fail on reasonable input. First let’s describe it as a dirty while loop instead: def len(l: List[_]): Int = { var count = 0 var cursor = l while (cursor != Nil) { count += 1 cursor = cursor.tail } count } THE TRICK for turning such functions into tail-recursions is to turn those variables, holding state, into function parameters. def len(l: List[_]): Int = { // Using an inner function to encapsulate this implementation @tailrec def loop(cursor: List[_], count: Int): Int = cursor match { // Our end condition, copied after that while case Nil => count case _ :: tail => // Copying the same logic from that while statement loop(cursor = tail, count = count + 1) } // Go, go, go loop(l, 0) } Now this version is fine. Note the use of the @tailrec annotation — all this annotation does is to make the compiler throw an error in case the function is not actually tail-recursive. That’s because that call is error-prone, and it needs repeating, this is an issue of correctness. Let’s do a more complex example to really internalize this. Let’s calculate the N-th number in the Fibonacci sequence — here’s the memory unsafe recursive version: def fib(n: Int): BigInt = if (n <= 0) 0 else if (n == 1) 1 else fib(n - 1) + fib(n - 2) fib(0) // 0 fib(1) // 1 fib(2) // 1 fib(3) // 2 fib(4) // 3 fib(5) // 5 fib(100000) // StackOverflowError (also, really slow) First turn this into a dirty while loop: def fib(n: Int): BigInt = { // Kids, don't do this at home 😅 if (n <= 0) return 0 // Going from 0 to n, instead of vice-versa var a: BigInt = 0 // instead of fib(n - 2) var b: BigInt = 1 // instead of fib(n - 1) var i = n while (i > 1) { val tmp = a a = b b = tmp + b i -= 1 } b } Then turn its 3 variables into function parameters: def fib(n: Int): BigInt = { @tailrec def loop(a: BigInt, b: BigInt, i: Int): BigInt = // first condition if (i <= 0) 0 // end of while loop else if (i == 1) b // logic inside while loop statement else loop(a = b, b = a + b, i = i - 1) loop(0, 1, n) } (Actual) Recursion # Tail-recursions are just loops. But some algorithms are actually recursive, and can’t be described via a while loop that uses constant memory. What makes an algorithm actually recursive is usage of a stack. In imperative programming, for low-level implementations, that’s how you can tell if recursion is required … does it use a manually managed stack or not? But even in such cases we can use a while loop, or a @tailrec function. Doing so has some advantages. Let’s start with a Tree data-structure: sealed trait Tree[+A] case class Node[+A](value: A, left: Tree[A], right: Tree[A]) extends Tree[A] case object Empty extends Tree[Nothing] Defining a fold, which we could use to sum-up all values for example, will be challenging: def foldTree[A, R](tree: Tree[A], seed: R)(f: (R, A) => R): R = tree match { case Empty => seed case Node(value, left, right) => // Recursive call for the left child val leftR = foldTree(left, f(seed, value))(f) // Recursive call for the right child foldTree(right, leftR)(f) } This is the simple version. And it should be clear that the size of the call-stack will be directly proportional to the height of the tree. And turning it into a @tailrec version means we need to manually manage a stack: def foldTree[A, R](tree: Tree[A], seed: R)(f: (R, A) => R): R = { @tailrec def loop(stack: List[Tree[A]], state: R): R = stack match { // End condition, nothing left to do case Nil => state // Ignore empty elements case Empty :: tail => loop(tail, state) // Step in our loop case Node(value, left, right) :: tail => // Adds left and right nodes to stack, evolves the state loop(left :: right :: tail, f(state, value)) } // Go, go, go! loop(List(tree), seed) } If you want to internalize this notion — recursion == usage of a stack — a great exercise is the backtracking algorithm. Implement it with recursive functions, or with dirty loops and a manually managed stack, and compare. The plot thickens for backtracking solutions using 2 stacks 🙂 Does this manually managed stack buy us anything? Well yes, if you need such recursive algorithms, such a stack can take up your whole heap memory, which means it can handle a bigger input. But note that with the right input, your process can still blow up, this time with an out-of-memory error (OOM). NOTE — in real life, shining examples of algorithms using manually managed stacks are Cats-Effect’s IO and Monix’s Task, since they literally replace the JVM’s call-stack 😄 Zen of Functional Programming? # In FP, you turn variables into (immutable) function parameters. And state gets evolved via function calls 💡 That’s it, that’s all there is to FP (plus the design patterns, and the pain of dealing with I/O 🙂). Enjoy!
https://alexn.org/blog/2021/01/26/tail-recursive-functions-in-scala/
CC-MAIN-2022-21
refinedweb
1,019
59.26
Data Binding is one of the important concept in Windows Phone 8. The Data Binding is generally done with the DataContext property which is part of the Framework Element. Oxygene and WP8 – Binding Data in Windows Phone For example , assume that the Object that needs to be used of Type Employee as shown below. type Employee = public class public property Name: System.String; property Designation: System.String; end; Now , we can create an instance of the Employee Object and assign it to the Framework element(ContentPanel in this example) of the Windows Phone Page. ContentPanel.DataContext := new Employee(Name := 'Senthil Kumar', Designation := 'Senior Software Engineer') The Employee Data is now available to the items within the ContentPanel Grid . To display the data (eg : Name) , the property “Name” of the Employee Object needs to be bound to the FrameworkElement . This can be acheived via the binding markup extension and specifying the path for the binding. For example , to bind the property Name for the TextBox , the code looks similar as shown below. <Grid x: <StackPanel> <TextBox Text="{Binding Path=Name}" /> </StackPanel> </Grid> When you run the application in the Windows Phone device / Emulator , you should see the name displayed in the TextBlock. The DataContext is the Source of the data received from the object . The Path refers to the Name of the property from the Object which needs to be bound to the control. Additionally , the Binding markup extension also includes an attribute mode which includes the values OneTime , OneWay and TwoWay. Download the sourcecode used in the above example here You can also bind the object from code behind. The Steps to bind the data using Oxygene / Code behind requires the Binding Object to interact between the FrameworkElement and the Object (Employee in this example). Once the Binding Object is created , it needs to be set to the TextBox via the SetBinding method as shown below. var emp: Employee := new Employee(Name := 'Senthil Kumar', Designation := 'Senior Software Engineer' ); var binding: System.Windows.Data.Binding := new System.Windows.Data.Binding(Source := emp, Path := new PropertyPath('Name'), Mode := System.Windows.Data.BindingMode.OneWay); txtName.SetBinding(TextBox.TextProperty, binding) The XAML code for the TextBox is as shown below. <Grid x: <StackPanel> <TextBox Name="txtName" /> </StackPanel> </Grid> Download the sample Code Snippet for binding the data from code behind here
http://developerpublish.com/oxygene-wp8-binding-data-windows-phone/
CC-MAIN-2017-22
refinedweb
387
54.73
Hide Forgot Programs using threading tend to lock up consistently -- it seems that some threads are either never unblocked, or they never exit. Same code on rhl-7.3 (python2) runs without locking issues. Could this be caused by NPTL? Here is the code I'm using: #!/usr/bin/python2 -tt import threading import socket import random class TestThread(threading.Thread): def __init__(self, ip, semaphore): threading.Thread.__init__(self) self.ip = ip self.semaphore = semaphore def run(self): self.semaphore.acquire() try: host = socket.gethostbyaddr(self.ip) self.host = host[0] except: self.host = self.ip print '%s resolves to %s' % (self.ip, self.host) self.semaphore.release() semaphore = threading.Semaphore(50) threads = [] for i in range(0, 100): a = random.randint(1, 250) b = random.randint(1, 250) ip = '152.3.%d.%d' % (a, b) t = TestThread(ip, semaphore) threads.append(t) t.start() hosts = {} for t in threads: t.join() hosts[t.ip] = t.host print 'Results:' print hosts As a temporary workaround: LD_ASSUME_KERNEL=2.4.19 This fixes the lockup issues. Which makes NPTL highly suspect. :) Thanks for the bug report, I'll run it through the glibc people. Can you please try ? Yes, those do fix the lock-up problem. I'm still running some tests, but so far it's been running smoothly. :D If you are satisfied with the result, can you please close the bug?
https://partner-bugzilla.redhat.com/show_bug.cgi?id=86416
CC-MAIN-2019-43
refinedweb
233
72.83
This Installation . . . . . . . . 7. . . . . . . . . . . 10. . . . . . . . . .4 Locator Strategies . . . . . .1 Introduction . . . . . . . . . . . . . . . . Alerts. . . Selenium-Grid User-Extensions 9. . . . . . . . . . . . 10. . . . . . . . . . . . . . . 7 . . . . . . . . . . . . . .0 and WebDriver 10. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 10 Selenium 2. . . . . . . . . . . . . . . . . . . . . . . . . .. . . . . . . . 7. . . . . . . . . . . . . . . . . . . .2 Actions . .8 Handling Errors . .3 Verifying Results . . . . . . .8 5. . . . 6. . . . . .3 Accessors/Assertions . . . . . . . . . . . . . . . . 6. . . . . . . . . . . .1 Introducing Test Design . . . . . . . . . . . . . . . . . . . . . .10 Specifying the Path to a Specific Browser . . 7. . . . .The Selenese Print Command . . . . . . . . . . . . .3 The 5 Minute Getting Started Guide 10. . . . . . . . . . . . . . 6. . . . . . . . . . . . . . . 9. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . The waitFor Commands in AJAX applications Sequence of Evaluation and Flow Control . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . Store Commands and Selenium Variables . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .5 Programming Your Test . . . . . . . . .12 Handling HTTPS and Security Popups . . . . . . 6. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .6 UI Mapping . . . . . . . .5 WebDriver Implementations . . . . . . . . . . . . . . . 6. . . . . . . . 10. . . . . . . . . . . . . . . . . . . . .7 Bitmap Comparison . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .6 Emulating Selenium RC . . . . . . . . .13 Supporting Additional Browsers and Browser Configurations 6. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .2 How Selenium-RC Works . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .9 5. . . . . . . . . . . . . . . . . .8 Adding Some Spice to Your Tests . . . . . . . . . . . . . . . . . . .5 Using User-Extensions With Selenium-IDE 9. . . . . . . . . . . . . . . . . . . . . . 10. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .7 Reporting Results . . . . . .10 6 The “AndWait” Commands . . 6. . . . . . . . . . . . . . . . . . . . . 7. . . . . 6. . . . . . . . . 6. . . . . .4 5. . 9. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .6 Using User-Extensions With Selenium RC . . . . . . . . . . . .4 From Selenese to a Program . . . . . . . . . . . . . . . . . .5 Testing Ajax Applications . . . . . . . . ii . . . . . . . . . . . . . . . . and Multiple Windows . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .5 5. . . . . . . 10. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .6 Learning the API . . . . . . . . . . . . Popups. . .7 5. . . . . . . . . . . . . JavaScript and Selenese Parameters . . . . . . . . . . . .2 When to Use WebDriver? . . . . . . . . 9. . . . . . . . . . . . . . . . . . .6 5. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .14 Troubleshooting Common Problems . . . . . . . . . . . . . . .9 Server Options . .1 Introduction . . . .11 Selenium-RC Architecture . . . . . . . . . . 6. . . . . . . . . . . . . . . . . . . 9. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .4 Choosing a Location Strategy 7. . . . . .7 Tips and Tricks . . . . . 6. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .1 What is WebDriver? . . . . . . . . . . . . . . . . . 7. . echo . . . . . . . . . . . . . . .4 Next Steps For Using WebDriver . . . . . . . . . . 6. .5. . . . . . 7. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 8 9 . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 6. . . . . . . . . . . Test Design Considerations 7. . . . . . . . . . . . .2 What to Test? . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 159 14. . . 119 120 121 121 123 11 . Roadmap . . . . . . . . . . . . . . . .11 How XPATH Works in WebDriver Getting and Using WebDriver . . .9 10. . . . . . . . . . . . . . . . . . . . . . . 159 iii .10 10. . . . . . . . . . . . . . . . . Further Resources . . . . . . . . . . . . . . . 143 13 Python Client Driver Configuration 155 14 Locating Techniques 159 14. . . . . . . . . . . . .1 Useful XPATH patterns . .8 10. . . . . . . . . . . . . . . . . . .2 Starting to use CSS instead of XPATH . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .2 Configuring Selenium-RC With Intellij . 127 12. . . . . . . . . . . . .10. . . . .1 Configuring Selenium-RC With Eclipse . . . . . . . . . . . .NET client driver configuration 12 Java Client Driver Configuration 127 12. . . . . . . . . . . . . . . . . . . . . . . . . . . . iv . Selenium Documentation.0 Contents: CONTENTS 1 . Release it will be updated shortly. then manual testing is the best solution. For instance. This ability to use Selenium-RC 6 Chapter 2. These operations are highly flexible.0 .0 Sel-RC interface for backwards compatability. It is relatively recent compared to Selenium’s history and was specifically designed to address some Selenium 1. along with also supporting the Selenium 1. There are times when manual testing may be more appropriate. PHP.4. Selenium 2. Selenium-RC provides an API (Application Programming Interface) and library for each of its supported languages: HTML. If an application has a very tight deadline. Java. there is currently no test automation available.4. allowing many options for locating UI elements and comparing expected test results against actual application behavior. manual testing may be more effective. Stay tuned.0 limitations. The entire suite of tools results in a rich set of testing functions specifically geared to the needs of testing of web applications of all types.2 Selenium-RC (Remote Control) Selenium-RC allows the test automation developer to use a programming language for maximum flexibility and extensibility in developing test logic. In addition. Python. and it’s imperative that the testing get done within that time frame. then any automation might need to be rewritten anyway. however learning all the tools will give you many different options for approaching different test automation problems. 2. 2. the programming language’s iteration support can be used to iterate through the result set. Release 1. Also. Introducing Selenium 2. and if the automated test program needs to run tests on each element in the result set. Both the Selenium developers and WebDriver developers agreed that both tools have advantatges and that merging the two projects would make a much more robust automation tool. It supports the WebDriver API and underlying technology. WebDriver is an API (application programming interface) which supports web application test automation just as does Selenium-RC (see next section).0.Selenium Documentation. if the application’s user interface will change considerably in the near future. Selenium WebDriver is the future direction of Selenium and the newest addition to the Selenium tool suite.3 Introducing Selenium Selenium is set of different software tools each with a different approach to supporting test automation.0 is the product of that effort. One of Selenium’s key features is the support for executing one’s tests on multiple browser platforms.0 still supports the Selenium 1. The integration of the WebDriver API is the primary improvement which defined Selenium 2. sometimes there simply is not enough time to build test automation. However the WebDriver API addresses some limitations of the old Sel-RC API. WebDriver originally was a separate project from Selenium. Each has a specific role. Most Selenium QA Engineers focus on the one or two tools that most meet the needs of their project.0 It is not always advantageous to automate test cases. Selenium 2. For the short term.4 Selenium’s Tool Suite Selenium is composed of multiple software tools. calling Selenium commands to run tests on each item.1 Selenium-WebDriver This section is under development. For instance. and Ruby.0 technology underneath the WebDriver API for maximum flexibility in developing web-app tests. 2. Perl. if the application under test returns a result set. 2. C#. robust test automation either Sel-WebDriver or Sel-RC should be used with one of the many supported programming languages. Release 1. to be integrated with a project’s automated build environment so the test automatically run each time the project is rebuilt. run tests Windows. run tests Windows. multiple instances of Selenium-RC are running on various operating system and browser configurations. With Selenium-Grid. Supported Browsers 7 .4 Selenium-Grid Selenium-Grid allows the Selenium-RC solution to scale for large test suites or test suites that must be run in multiple environments. SeleniumIDE also offers full editing of test cases for more precision and control. it can only run it’s test scripts against Firefox. Linux. run tests Windows. Mac Firefox 3 Record and playback tests Start browser.Selenium Documentation. 2. 2. Sel-IDE cannot add iteration or condition-statements to a test script._ The Selenium developers recommend for serious.4. When tests are sent to the hub they are then redirected to an available Selenium-RC. Mac Opera 9 Test execution only via Selenium-RC Start browser. Specifically.5. At the time of writing there is no plan to add iteration or conditions to Sel-IDE. since Selenium-IDE is a Firefox plugin. run tests Windows.5 Supported Browsers IMPORTANT: Note this list was for Sel 1. Mac IE 8 Test execution only via Selenium-RC* Start browser. In Addition. ============= ================================================== =========================== ===================== Browser Selenium-IDE Selenium-RC Operating Systems Firefox 3. run tests Windows Safari 4 Test execution only via Selenium-RC Start browser. that is. run tests Windows IE 7 Test execution only via Selenium-RC* Start browser. run tests Mac Safari 2 Test execution only via Selenium-RC Start browser. It is a Firefox plugin and provides an easy-to-use interface for developing automated tests. Selenium-IDE has a recording feature. which allows the user to pick from a list of assertions and verifications for the selected UI element. with the entire test suite theoretically taking only as long to run as the longest individual test. For running tests using additional browsers you must run Selenium using one of the other tools. Linux. Mac Firefox 2 Record and playback tests Start browser. It requires updating for Sel2. The reasons are partly technical and partly that the Selenium developers wish to encourage best practices in test automation which always requires some amount of programming. which records user actions as they are performed and then stores them as a reusable script that can be played back. run tests Mac Opera 10 Test execution only via Selenium-RC Start browser. _Sel-IDE is simply intended as a rapid prototyping tool. Note that Sel-IDE is not designed to run your test passes nor is it designed to build all the automated tests you will need.x Record and playback tests Start browser. Linux. Mac Opera 8 Test execution only via Selenium-RC Start browser. run tests Mac Safari 3 Test execution only via Selenium-RC Start browser. 2.4. all the while watching the browser’s behavior while the playback occurs. 2. For that you will need a programming language combined with either Sel-WebDriver or Sel-RC.0 with a high-level programming language to develop test cases also allows the automated testing to use continuous integration. Each of these when launching register with a hub. Linux. It also has a context menu (right-click) integrated with the Firefox browser. run tests Windows IE 6 Test execution only via Selenium-RC* Start browser. Linux. which will launch the browser and run the test.0–we will do that very soon. run tests Windows.3 Selenium-IDE Selenium-IDE (Integrated Development Environment) is a prototyping tools for building Selenium test scripts. This allows for running tests in parallel.0. Selenium Documentation. Introducing Selenium 2. Selenium Commands Describes a subset of the most useful Selenium commands in detail. Finally. ** Selenium-RC server can start any executable.7 Chapters Described This reference documentation targets both new users of Selenium and those who have been using Selenium and are seeking additional knowledge. We explain how your test script can be “exported” to the programming language of your choice. We do not assume the reader has experience in testing beyond the basics. Mac Google Chrome Test execution only via Selenium-RC (Windows) Start browser. Selenium-RC support for multiple programming and scripting languages allows the test writer to build any logic they need into their automated testing and to use a preferred programming or scripting language of one’s choice. Linux. Selenium’s strongest characteristic when compared with proprietary test automation tools and other open source solutions. verifications and assertions can be made against a web application. It compiles in one place a set of useful Selenium techniques and best practices by drawing from the knowledge of multiple experienced Selenium QA professionals.6 Flexibility and Extensibility You’ll find that Selenium is highly flexible. This allows users to customize the generated code to fit in with their own test frameworks. but depending on browser security settings there may be technical limitations that would limit certain features. There are multiple ways in which one can add functionality to Selenium’s framework to customize test automation for one’s specific testing needs. 2. Also provides a general description of Selenium commands and syntax. this section describes some configurations available for extending and customizing how the Selenium-IDE supports test case development. 2. Also. This chapter also describes useful techniques for making your scripts more readable when interpreting defects caught by your Selenium tests. 8 Chapter 2. This chapter shows what types of actions. This section allows you to get a general feel for how Selenium approaches test automation and helps you decide where to begin. Selenium is an Open Source project where code can be modified and enhancements can be submitted for contribution. The experienced Selenium user will also find this reference valuable.0 . Finally.0 run tests Windows. It introduces the novice to Selenium test automation. perhaps. Selenium-IDE allows for the addition of user-defined “user-extensions” for creating additional commands customized to the user’s needs. it is possible to re-configure how the Selenium-IDE generates its Selenium-RC code. This is. Release 1. The remaining chapters of the reference present: Selenium Basics Introduces Selenium by describing how to select the Selenium component most appropriate for your testing tasks. Selenium-IDE Teaches how to build test cases using the Selenium Integrated Development Environment. run tests Windows Others Test execution only via Selenium-RC Partial support possible** As applicable ============= ================================================== =========================== ===================== * Tests developed on Firefox via Selenium-IDE can be executed on any other supported browser via a simple Selenium-RC command line. we must recognize the Selenium Developers. We also cover techniques commonly asked about in the user group such as how to implement data-driven tests (tests where one can vary the data between different test passes). They have truly designed an amazing tool. Selenium-Grid This chapter is not yet developed. and to Amit Kumar for participating in our discussions and for assisting with reviewing the document. pop-ups. This includes handling Security Certificates. along with their trade-offs and limitations. and long term involvement in the Selenium community. User extensions Presents all the information required for easily extending Selenium. The various modes.0 Selenium-RC Explains how to develop an automated test program using the Selenium-RC API. Many examples are presented in both a programming language and a scripting language.8. 2. the installation and setup of Selenium-RC is covered here. He also set us up with everything we needed on the seleniumhq. And of course. A number of solutions to problems which are often difficult for the new user are described in this chapter. 2. https requests. Without the vision of the original designers. or configurations.Selenium Documentation. creator of SEl-RC.8. the following people have made significant contributions to the authoring of this user’s guide or with out publishing infrastructure or both. his support was invaluable when writing the original user’s guide. Release 1. As an administrator of the SeleniumHQ website. Test Design Considerations Presents many useful techniques for using Selenium efficiently. Also. Architecture diagrams are provided to help illustrate these points. This includes scripting techniques and programming techniques for use with Selenium-RC. Also thanks goes to Andras Hatvani for his advice on publishing solutions. The Documentation Team–Authors Past and Present 9 . that Selenium-RC supports are described.org website for publishing the documents. We cover examples of source code showing how to report defects in the application under test.8 The Documentation Team–Authors Past and Present In alphabetical order. and the opening of new windows. and the continued efforts of the current developers. Patrick helped us understand the our audience. we would not have such a great tool to pass on to you.1 Acknowledgements A huge special thanks goes to Patrick Lightbody. • Dave Hunt • Mary Ann May-Pumphrey • Paul Grandjean • Peter Newhook • Santiago Suarez Ordonez • Tarun Kumar 2. 0 .0 10 Chapter 2. Release 1.Selenium Documentation. Introducing Selenium 2. and many other web-application features. event handling. The Selenium-IDE can serve as an excellent way to train junior-level employees in test automation.2 Introducing Selenium Commands 3. pop up windows. one can test the existence of UI elements based on their HTML tags. the Selenium community is encouraging the use of Selenium-IDE and RC and discouraging the use of Selenium-Core. such as testing each element of a variable length list requires running the script from a programming language. any tests requiring iteration. You can develop your first script in just a few minutes. In addition Selenium commands support testing of window size. mouse position. similar to Selenium-IDE. The command set is often called selenese. Since the development of SeleniumIDE and Selenium-RC. Support for Selenium-Core is becoming less available and it may even be deprecated in a future release. However. This is what we recommend. it does not support iteration. test for broken links. This is the original method for running Selenium commands. selection list options.html. You may also run your scripts from the Selenium-IDE. When programming logic is required Selenium-RC must be used. test for specific content. Selenium-Core also cannot switch between http and https protocols. submitting forms. Selenium-IDE is also very easy to install. These commands essentially create a testing language.CHAPTER THREE SELENIUM BASICS 3. 3. and table data among other things. It’s an easy way to get familiar with Selenium commands quickly.2.1 Getting Started – Choosing Your Selenium Tool Most people get started with Selenium-IDE. Ajax functionality. One can run test scripts from a web-browser using the HTML interface TestRunner. Selenium-IDE does not support iteration or condition statements. See the chapter on Selenium-IDE for specifics. In selenese. The Command Reference lists all the available commands. more people are using these tools rather than Selenium-Core. Selenium-Core is another way of running tests.1 Selenium Commands – Selenese Selenium provides a rich set of commands for fully testing your web-app in virtually any way you can imagine. For example. input fields. It’s simple to use and is recommended for lesstechnical users. Finally. alerts. 11 . Some testing tasks are too complex though for the Selenium-IDE. The IDE allows developing and running tests without the need for programming skills as required by Selenium-RC. Anyone who understands how to conduct manual testing of a website can easily transition to using the Selenium-IDE for running and developing tests. At the time of writing (April 09) it is still available and may be convenient for some. It has limitations though. it depends on the command. Parameters vary. “storeTitle”. selenium variables. “clickAndWait”. and in still others the command may take no parameters at all. Selenium commands come in three “flavors”: Actions. Examples include “make sure the page title is X” and “verify that this checkbox is checked”. you can “assertText”. they will fail and halt the test if the condition does not become true within the current timeout setting (see the setTimeout action below). text patterns. • a text pattern for verifying or asserting expected page content • a text pattern or a selenium variable for entering text in an input field or for selecting an option from an option list. the execution of the current test is stopped. however they are typically: • a locator for identifying a UI element within a page. For example: verifyText //div//a[2] Login The parameters are not always required. Locators. the test is aborted. 12 Chapter 3. and that Selenium should wait for a new page to load. the test will continue execution. They are also used to automatically generate Assertions. In some cases both are required. “waitFor” commands wait for some condition to become true (which can be useful for testing Ajax applications). This allows a single “assert” to ensure that the application is on the correct page. Accessors and Assertions. followed by a bunch of “verify” assertions to test form field values. For example. They do things like “click this link” and “select that option”. They will succeed immediately if the condition is already true. labels. If an Action fails.g. All Selenium Assertions can be used in 3 modes: “assert”. and the commands themselves are described in considerable detail in the section on Selenium Commands. or has an error. 3.Selenium Documentation. but they verify that the state of the application conforms to what is expected. Many Actions can be called with the “AndWait” suffix. When a “verify” fails. e. However. • Assertions are like Accessors.g. When an “assert” fails.0 A command is what tells Selenium what to do. Release 1.2. in others one parameter is required. e. This suffix tells Selenium that the action will cause the browser to make a call to the server. • Accessors examine the state of the application and store the results in variables. they consist of the command and two parameters. and ” waitFor”. Selenium Basics . “verifyText” and “waitForText”. Here are a couple more examples: goBackAndWait verifyTextPresent type type Welcome to My Home Page (555) 666-7066 ${myVariableAddress} id=phone id=address1 The command reference describes the parameter requirements for each command. etc. “verify”. logging the failure.2 Script Syntax Selenium commands are simple. • Actions are commands that generally manipulate the state of the application. This consists of an HTML table with three columns. the second is a target. The first column identifies the Selenium command. Each table row represents a new Selenium command./SaveValues. <html> <head> <title>Test Suite Function Tests .0 Selenium scripts that will be run from Selenium-IDE will be be stored in an HTML text file format. Nunit could be employed. The second and third columns may not require values depending on the chosen Selenium command. An HTML table defines a list of tests where each row defines the filesystem path to each test. With a basic knowledge of selenese and Selenium-IDE you can quickly produce and run testcases. but they should be present. and the final column contains a value. test suites also can be defined using a simple HTML file. one after another. If using an interpreted language like Python with Selenium-RC than some simple programming would be involved in setting up a test suite. Commonly Junit is used to maintain a test suite if one is using Selenium-RC with Java. This is done via programming and can be done a number of ways. Test suites can also be maintained when using Selenium-RC. 3. The syntax again is simple.Priority 1</title> </head> <body> <table> <tr><td><b>Suite Of Tests</b></td></tr> <tr><td><a href= ". Since the whole reason for using Sel-RC is to make use of programming logic for your testing this usually isn’t a problem. Often one will run all the tests in a test suite as one continuous batch-job./SearchValues. When using Selenium-IDE. 3.html" >Test Searching for Values</a></td></tr> <tr><td><a href= ". Test Suites 13 .3.3 Test Suites A test suite is a collection of tests. from the Selenium-IDE. Here is an example of a test that opens a page.Selenium Documentation. if C# is the chosen language.html" >Login</a></td></tr> <tr><td><a href= ". An example tells it all./Login.html" >Test Save</a></td></tr> </table> </body> </html> A file similar to this would allow running the tests all at once. Additionally. Release. 5 Summary Now that you’ve seen an introduction to Selenium. verifyTitle/assertTitle verifies an expected page title. waitForPageToLoad pauses execution until an expected new page loads. open opens a page using a URL.4 Commonly Used Selenium Commands To conclude our introduction of Selenium.0 3. We recommend beginning with the Selenium IDE and its context-sensitive. is present on the page. Called automatically when clickAndWait is used. verifyText verifies expected text and it’s corresponding HTML tag are present on the page. Selenium Basics . verifyElementPresent verifies an expected UI element. and you can have a simple script done in just a minute or two. you’re ready to start writing your first scripts. verifyTextPresent verifies expected text is somewhere on the page. menu. 14 Chapter 3. This will allow you to get familiar with the most common Selenium commands quickly. waitForElementPresent pauses execution until an expected UI element. right-click. we’ll show you a few typical Selenium commands. verifyTable verifies a table’s expected contents. Release 1.Selenium Documentation. and optionally waits for a new page to load. is present on the page. click/clickAndWait performs a click operation. 3. as defined by its HTML tag. as defined by its HTML tag. Chapter 3 gets you started and then guides you through all the features of the Selenium-IDE. These are probably the most commonly used commands for building tests. 2 Installing the IDE Using Firefox.1 Introduction The Selenium-IDE (Integrated Development Environment) is the tool you use to develop your Selenium test cases. It’s an easy-to-use Firefox plug-in and is generally the most efficient way to develop test cases. so you will need to click ‘Allow’ to proceed with the installation.CHAPTER FOUR SELENIUM-IDE 4. This is not only a time-saver. 15 . download the IDE from the SeleniumHQ downloads page Firefox will protect you from installing addons from unfamiliar locations. but also an excellent way of learning Selenium script syntax. as shown in the following screenshot.. 4. first. and when the 16 Chapter 4. The Firefox Add-ons window pops up. Release 1.0 When downloading from Firefox. you’ll be presented with the following window. Select Install Now.Selenium Documentation. first showing a progress bar. Selenium-IDE . After Firefox reboots you will find the Selenium-IDE listed under the Firefox Tools menu.2.0 download is complete. Release 1. displays the following.Selenium Documentation. Installing the IDE 17 . 4. Restart Firefox. 3 Opening the IDE To run the Selenium-IDE.Selenium Documentation. simply select it from the Firefox Tools menu. Release 1.0 4. 18 Chapter 4. or creating new test cases. It opens as follows with an empty script-editing window and a menu for loading. Selenium-IDE . delete. and specify the format (language) used when saving your test cases. is the record button.1 Menu Bar The File menu allows you to create. undo. The Edit menu allows copy. 4. paste. You can set the timeout value for certain commands. add user-defined user extensions to the base set of Selenium commands. open. only one item on this menu–UI-Element Documentation–pertains to Selenium-IDE.Selenium Documentation. Release 1. The Options menu allows the changing of settings. IDE Features 19 . and select all operations for editing the commands in your test case.2 Toolbar The toolbar contains buttons for controlling the execution of your test cases. the one with the red-dot.4. The Help menu is the standard Firefox Help menu.4 IDE Features 4. 4.4. and save test case and test suite files.4. including a step feature for debugging your test cases.0 4. The right-most button. including copy. The Source view also allows one to edit the test case in its raw form. 20 Chapter 4. it always goes in the Value field. The TestRunner is not commonly used now and is likely to be deprecated. or a scripting language like Python. Step: Allows you to “step” through a test case by running it one command at a time. The first parameter specified for a command in the Reference tab of the bottom pane always goes in the Target field. Pause/Resume: Allows stopping and re-starting of a running test case. Target. TestRunner Mode: Allows you to run the test case in a browser loaded with the Selenium-Core TestRunner. The other tab . Most users will probably not need this button.4. Run All: Runs the entire test suite when a test suite with multiple test cases is loaded. It has two tabs. Release 1. See the Options menu for details.3 Test Case Pane Your script is displayed in the test case pane. If a second parameter is specified by the Reference tab. The Command. this is HTML although it can be changed to a programming language such as Java or C#. 4. By default. These are entry fields where you can modify the currently selected command. one for displaying the command and their parameters in a readable “table” format. cut and paste operations. Detailed documentation on rollup rules can be found in the UI-Element Documentation on the Help menu. Record: Records the user’s browser actions. Run: Runs the currently selected test.0 Speed Control: controls how fast your test case runs.Source displays the test case in the native format in which the file will be stored. Selenium-IDE .Selenium Documentation. When only a single test is loaded this button and the Run All button have the same effect. Apply Rollup Rules: This advanced feature allows repetitive sequences of Selenium commands to be grouped into a single action. This button is for evaluating test cases for backwards compatibility with the TestRunner. Use for debugging test cases. and Value entry fields display the currently selected command along with its parameters. the Reference pane will display documentation on the current command. you can then select your desired command from the drop-down.4. the command will not run correctly. whether from Table or Source mode.Selenium Documentation. While the Reference tab is invaluable as a quick reference. Notice the Clear button for clearing the Log.4 Log/Reference/UI-Element/Rollup Pane The bottom pane is used for four different functions–Log. 4. even if you do not first select the Log tab. When entering or modifying commands. and the type of parameters provided must match the type specified. Release 1.4. The number of parameters provided must match the number specified. Log When you run your test case. it is critically important to ensure that the parameters specified in the Target and Value fields match those specified in the parameter list in the Reference pane. Reference. In Table mode. the order of parameters provided must match the order specified. error messages and information messages showing the progress are displayed in this pane automatically. it is still often necessary to consult the Selenium Reference document. Reference The Reference tab is the default selection whenever you are entering or modifying Selenese commands and parameters in Table mode. These messages are often useful for test case debugging. Also notice the Info button is a drop-down allowing selection of different levels of information to log. UI-Element. If there is a mismatch in any of these three areas. a drop-down list will be populated based on the first characters you type. and Rollup– depending on which tab is selected.0 If you start typing in the Command field. IDE Features 21 . 4. 5. The first time you use Selenium. This will cause unexpected test case failures.click command Here are some “gotchas” to be aware of: • The type command may require clicking on some other area of the web page for it to record.5.0 UI-Element and Rollup Detailed information on these two panes (which cover advanced features) can be found in the UIElement Documentation on the Help menu of Selenium-IDE. right-click the selected text. and deselecting “Start recording immediately on open. 4. Frequently. Open a web-page of your choosing and select a block of text on the page. the record button is ON by default. The context menu should give you a verifyTextPresent command and the suggested parameter should be the text itself. along with the parameters. We won’t describe the specifics of these commands here. If you do not want Selenium-IDE to begin recording automatically you can turn this off by going under Options > Options. Selenium-IDE .select command • clicking checkboxes or radio buttons . You will often need to change this to clickAndWait to ensure your test case pauses until the new page is completely loaded. With Selenium-IDE recording..Selenium Documentation. 4.. When Selenium-IDE is first opened. Release 1.5 Building Test Cases There are three primary methods for developing test cases. you will find additional commands will quickly be added to this menu. Selenium-IDE will attempt to predict what command. As you use the IDE however.” During recording. A paragraph or a heading will work fine. 22 Chapter 4. your test case will continue running commands before the page has loaded all its UI elements. 4. Now. Typically. go to the browser displaying your test application and right click anywhere on the page. Let’s see how this works.type command • selecting options from a drop-down listbox . This requires assert and verify commands. this will include: • clicking a link . Here we’ll simply describe how to add them to your test case. you will need for a selected UI element on the current web-page.click or clickAndWait commands • entering values . there may only be one Selenium command listed.2 Adding Verifications and Asserts With the Context Menu Your test cases will also need to check the properties of a web-page. Otherwise.1 Recording Many first-time users begin by recording a test case from their interactions with a website. that is in the chapter on “Selenese” Selenium Commands. • Following a link usually records a click command. Selenium-IDE will automatically insert commands into your test case based on your actions. You will see a context menu showing verify and/or assert commands. a test developer will require all three techniques. the IDE will add a blank line just ahead of the line you selected.5. if one is required). You may need to use Show All Available Commands to see options other than verifyTextPresent. an empty comment won’t. in the Test Case Pane. left-click between the commands where you want to insert a new command. Release 1.0 Also. i. selecting verifyElementPresent for an image should later cause that command to be available on the primary context menu the next time you select an image and right-click. Try right-clicking an image. the more commonly used ones will show up on the primary context menu. feel free to use the IDE to record and select commands into a test case and then run it. To do this. An empty command will cause an error during execution. many more commands. Add an HTML-style comment. 4. left-click on the line where you want to insert a new command. This shows many. Once you select these other options. notice the Show All Available Commands menu option. Now use the command editing text fields to enter your new command and its parameters. again. Your comment will appear in purple font. To do this. first parameter (if one is required by the Command). just create empty comments. Building Test Cases 23 .Selenium Documentation.e. and second parameter (again. Try a few more UI elements. for testing your currently selected UI element. Table View Select the line in your test case where you want to insert the comment. and enter the HTML tags needed to create a 3-column row containing the Command. Source View Select the point in your test case where you want to insert the command. Insert Comment Comments may be added to make your test case more readable. These comments are ignored when the test case is run. Now use the Command field to enter the comment. For now though. You can learn a lot about the Selenium commands simply by experimenting with the IDE. <!-. Again. along with suggested parameters. Right-click and select Insert Command. Comments may also be used to add vertical white space (one or more blank lines) in your tests. Right-click and select Insert Comment.your comment here -->.. For example. Be sure to save your test before switching back to Table view. or a user control like a button or a checkbox. 4.3 Editing Insert Command Table View Select the point in your test case where you want to insert the command.5. these commands will be explained in detail in the chapter on Selenium commands. Source View Select the point in your test case where you want to insert the comment. in the Test Case Pane. portal. Run Any Single Command Double-click any single command to run it by itself. This is useful for debugging your test case. right-click. When you open an existing test case or suite. run it one line at a time. To set a startpoint. Run a Test Case Click the Run button to run the currently displayed test case. To set a breakpoint. If the test cases of your test suite have not been saved. and you can do a batch run of an entire test suite. and from the context menu select Toggle Breakpoint. Execution of test cases is very flexible in the IDE. You can double-click it to see if it runs correctly. To continue click Resume. Target. stop and start it. Run a Test Suite Click the Run All button to run all the test cases in the currently loaded test suite. You can run a test case all at once. This also is used for debugging. This is useful when writing a single command. Suppose that a site named. Selenium-IDE displays its Selenium commands in the Test Case Pane.4 Opening and Saving a Test Case Like most programs. you’ll be prompted to save them before saving the test suite. Source View Since Source view provides the equivalent of a WYSIWYG editor. when you are not sure if it is correct. or comment. This is also available from the context menu.7 Using Base URL to Run Test Cases in Different Domains The Base URL field at the top of the Selenium-IDE window is very useful for allowing test cases to be run across different domains.6 Running Test Cases The IDE allows many options for running your test case. or save the test suite. run a single command you are currently developing. Stop in the Middle You can set a breakpoint in the test case to cause it to stop on a particular command. simply modify which line you wish– command. It lets you immediately test a command you are constructing. select a command. The icon of this button then changes to indicate the Resume button. Release 1.Selenium Documentation. Selenium-IDE . 4.5. Stop and Start The Pause button can be used to stop the test case while it is running. and Value fields. 4. parameter. To save your Selenium-IDE tests for later use you can either save the individual test cases. However. right-click. 4. Start from the Middle You can tell the IDE to begin running from a specific command in the middle of the test case.0 Edit a Command or Comment Table View Simply select the line to be changed and edit it using the Command. select a command. there are Save and Open commands under the File menu.com had an in-house beta 24 Chapter 4. Selenium distinguishes between test cases and test suites. and from the context menu select Set/Clear Start Point. It is also sometimes useful to run a test case from somewhere in the middle to the end of the test case or up to a breakpoint that follows the starting point. set a breakpoint on the command just before the one to be examined.1 Breakpoints and Startpoints The Sel-IDE supports the setting of breakpoints and the ability to start and stop the running of a test case. suppose your test case first logs into the website and then performs a series of tests and you are trying to debug one of those tests. select a command. from any point within the test case. Debugging 25 .com.com/about. To set a breakpoint. the test case below would be run against. Any test cases for these sites that begin with an open statement should specify a relative URL as the argument to open rather than an absolute URL (one starting with a protocol such as http: or https:).com/about. Then click the Run button to run your test case from the beginning up to the breakpoint. To do this.portal. For example. That is.8. For example. right-click. If this is new to you. we recommend you ask one of the developers in your organization.8. We won’t teach debugging here as most new users to Selenium will already have some basic experience with debugging.Selenium Documentation. Release 1. one can run up to a specific command in the middle of the test case and inspect how the test case behaves at that point. you 4.portal.news.portal. and from the context menu select Toggle Breakpoint. Selenium-IDE will then create an absolute URL by appending the open command’s argument onto the end of the value of Base URL.news. However. This is a normal part of test case development.8 Debugging Debugging means finding and fixing errors in your test case. 4.html: Base URL setting would be run against 4.html: This same test case with a modified site named. among others. clickAndWait. 4. it stores additional information which allows the user to view other possible locator-type arguments that could be used instead.5 Locator Assistance Whenever Selenium-IDE records a locator-type argument. To set a startpoint. It can be used with any command that identifies a UI element on a webpage. and certain assert and verify commands.3 Find Button The Find button is used to see which UI element on the currently displayed webpage (in the browser) is used in the currently selected Selenium command. select just that portion of the webpage for which you want to see the source. Then click the Run button to execute the test case beginning at that startpoint. Use its Search feature (Edit=>Find) to search for a keyword to find the HTML for the UI element you’re trying to test. with highlighting on the portion representing your selection.8. That will prevent you from having to manually logout each time you rerun your test case. This feature can be 26 Chapter 4. From Table view. 4. Now look on the webpage: There should be a bright green rectangle enclosing the element specified by the locator parameter. Simply right-click the webpage and select ‘View->Page Source. select any command that has a locator parameter. right-click.0 only need to login once.8. Firefox makes this easy.4 Page Source for Debugging Often.8. click. then run your test case from a startpoint placed after the login portion of your test case. and from the context menu select Set/Clear Start Point.Selenium Documentation.2 Stepping Through a Testcase To execute a test case one command at a time (“step through” it). you simply must look at the page source (the HTML for the webpage you’re trying to test) to determine a problem.8. Then rightclick the webpage and select View Selection Source. Repeatedly select the Step button.e. Click the Find button. The HTML opens in a separate window. 4. select a command. Start the test case running with the Run button from the toolbar. when debugging a test case. Selenium-IDE . Release 1. 1. 4. In this case. type. the separate HTML window will contain just a small amount of source. i. follow these steps: 1. 1. Immediately pause the executing test case with the Pause button. You can login once. This is useful when building a locator for a command’s first parameter (see the section on locators in the Selenium Commands chapter). but you need to keep rerunning your tests as you are developing them. Alternatively. Note that the first column of the drop-down provides alternative locators. Release 1. A test suite file is an HTML file containing a one-column table. The test suite pane will be automatically opened when an existing test suite is opened or when the user selects the New Test Case item from the File menu. This locator assistance is presented on the Selenium-IDE window as a drop-down list accessible at the right end of the Target field (only when the Target field contains a recorded locator-type argument).9 Writing a Test Suite A test suite is a collection of test cases which is displayed in the leftmost pane in the IDE. 4. whereas the second column indicates the type of each alternative.9. Below is a snapshot showing the contents of this drop-down for one command. Each cell of each row in the <tbody> section contains a link to a test case. Users who want to create or modify a test suite by adding pre-existing test cases must manually edit a test suite file. In the latter case. the new test case will appear immediately below the previous test case.Selenium Documentation. The example below is of a test suite containing four test cases: 4. and is often needed to help one build a different type of locator than the type that was recorded. Writing a Test Suite 27 . Selenium-IDE does not yet support loading pre-existing test cases into a test suite.0 very useful for learning more about locators. The test suite pane can be manually opened or closed via selecting a small dot halfway down the right edge of the pane (which is the left edge of the entire Selenium-IDE window if the pane is closed). There are a number of useful extensions created by users.js. look at the page created by its author.0 <html> <head> <meta http-D Links</a></td></tr> </tbody> </table> </body> </html> Note: Test case files should not have to be co-located with the test suite file that invokes them.html" >A Links</a></td></tr> <tr><td><a href= ". 28 Chapter 4./a.html" >C Links</a></td></tr> <tr><td><a href= ". Selenium-IDE . For an example of how to use the functionality provided by this extension./c./b. 4./d. this feature is used to translate your test case into a programming language. Essentially. tests developed with Selenium-IDE can be run against other browsers. This topic is covered in the Run Selenese tests section on Selenium-RC chapter.11. Information on writing your own extensions can be found near the bottom of the Selenium Reference document. 4. you must close and reopen Selenium-IDE in order for the extensions file to be read. under the Options menu. you can alter it by editing a configuration file which defines the generation process.13 Troubleshooting Below is a list of image/explanation pairs which describe frequent sources of problems with SeleniumIDE: Table view is not available with this format. An enhancement request has been raised to improve this error message. error loading test case: no command found You’ve used File=>Open to try to open a test suite file. Release 1. Each supported language has configuration settings which are editable. However the author has altered the C# format in a limited manner and it has worked well. See issue 1010. note that if the generated code does not suit your needs. The workaround is to close and reopen Selenium IDE. 4. i. If you will be using Selenium-RC to run your test cases. Format 29 . you will be using with Selenium-RC for developing your test programs. The -htmlSuite command-line option is the particular feature of interest. for more information. The default is HTML. allows you to select a language for saving and displaying the test case.11 Format Format. If you are able to reproduce this reliably then please provide details so that we can work on a fix. program code supporting your test is generated for you by Selenium-IDE. See issue 1008. Java. Note: At the time of this writing. Also. Your test case will be translated into a series of functions in the language you choose. This is under the Options=>Options=>Format tab.Selenium Documentation. Any change you make to an extension will also require you to close and reopen SeleniumIDE. using a simple command-line interface that invokes the Selenium-RC server.0 After selecting the OK button. 4.12 Executing Selenium-IDE Tests on Different Browsers While Selenium-IDE can only run tests against Firefox. Then simply save the test case using File=>Save. this feature is not yet supported by the Selenium developers. PHP.e. Use File=>Open Test Suite instead. 4. Select the language. This message can be occasionally displayed in the Table tab when Selenium IDE is launched. it indicates that you haven’t actually created the variable whose value you’re trying to access. “Component returned failure code: 0x80520012 (NS_ERROR_FILE_NOT_FOUND) [nsIFileInputStream. If so. Make sure that the test case is indeed located where the test suite indicates it is located. the two parameters for the store command have been erroneously placed in the reverse order of what is required.. Whenever your attempt to use variable substitution fails as is the case for the open command above. Selenium-IDE . investigate using an appropriate waitFor* or *AndWait command before the failing command.js :: anonymous :: line 48” data: no] One of the test cases in your test suite cannot be found. This is sometimes due to putting the variable in the Value field when it should be in the Target field or vice versa. 30 Chapter 4. For any Selenese command.Selenium Documentation. See issue 1011. Selenium-IDE is very space-sensitive! An extra space before or after a command will cause it to be unrecognizable.0 This type of error may indicate a timing problem. error loading test case: [Exception.e. the first required parameter must go in the Target field. make sure that your actual test case files have the . i. the element specified by a locator in your command wasn’t fully loaded when the command was executed. Try putting a pause 5000 before the command to determine whether the problem is indeed related to timing.. Release 1. and in the test suite file where they are referenced.init]” nresult: “0x80520012 (NS_ERROR_FILE_NOT_FOUND)” location: “JS frame :: chrome://selenium-ide/content/fileutils..html extension both in their filenames. and the second required parameter (if one exists) must go in the Value field. In the example above. Also. An enhancement request has been raised to improve this error message. Thus.13. Troubleshooting 31 . Selenium-IDE is correct to generate an error. This type of error message makes it appear that Selenium-IDE has generated a failure where there is none. However. Your extension file’s contents have not been read by Selenium-IDE.0 This defect has been raised. which is confusing. See issue 1013. This defect has been raised. but is misleading in the nature of the error. Also. The problem is that the log file error messages collapse a series of two or more spaces into a single space. Selenium-IDE must be restarted after any change to either an extensions file or to the contents of the Selenium Core extensions field. 4. See issue 1012. Release 1. note that the parameter for verifyTitle has two spaces between the words “Selenium” and “web” The page’s actual title has only one space between these words. Be sure you have specified the proper pathname to the extensions file via Options=>Options=>General in the Selenium Core extensions field.Selenium Documentation. Selenium-IDE is correct that the actual value does not match the value specified in such test cases. In the example above. Selenium-IDE .0 32 Chapter 4.Selenium Documentation. Release 1. and start each group with an “assert” followed by one or more “verify” test commands. and the web designers frequently change the specific image file along with its position on the page.1 Verifying Page Elements Verifying UI elements on a web page is probably the most common feature of your automated tests. if you are testing a text heading. will you test that. The best use of this feature is to logically group your test commands. however. There’s very little point checking that the first paragraph on the page is the correct one if your test has already failed when checking that the browser is displaying the expected page. are the set of commands that run your tests. Selenese allows multiple ways of checking for UI elements.1 Assertion or Verification? Choosing between “assert” and “verify” comes down to convenience and management of failures. 5. an element is present somewhere on the page? 2.. A sequence of these commands is a test script. If you’re not on the correct page. Here we explain those commands in detail. 5. the text and its position at the top of the page are probably relevant for your test. then you only want to test that an image (as opposed to the specific image file) exists somewhere on the page. If. An example follows: 33 . For example. you’ll probably want to abort your test case so that you can investigate the cause and fix the issue(s) promptly.CHAPTER FIVE SELENIUM COMMANDS Selenium commands. often called selenese. you are testing for the existence of an image on the home page. On the other hand. It is important that you understand these different methods because these methods define what you are actually testing. and we present the many choices you have in testing your web application when using Selenium. Effectively an “assert” will fail the test and abort the current test case. whereas a “verify” will fail the test and continue to run the test case.. specific text is somewhere on the page? 3.1.. 1. 1 1.2 1. The first (and only) parameter is a locator for telling the Selenese command how to find the element. Do not use this when you also need to test where the text occurs on the page. locators are explained in the next section. and that it follows a <div> tag and a <p> tag.2. verifyElementPresent can be used to check the existence of any HTML tag within the page. 34 Chapter 5. rather then its content. only the HTML tag. Only if this passes will the following command run and “verify” that the text is present in the expected location.1. and only if this passed will the remaining cells in that row be “verified”. that the text string “Marketing Analysis” appears somewhere on the page currently being tested.2. Use verifyTextPresent when you are interested in only the text itself being present on the page. is present on the page.3.1. Here are a few more examples. You can check the existence of links.. One common use is to check for the presence of an image. Again. 5.Selenium Documentation. For example: Command verifyTextPresent Target Marketing Analysis Value This would cause Selenium to search for.3 Value Downloads Selenium IDE June 3. divisions <div>. Command verifyElementPresent Target //div/p/img Value This command verifies that an image. and verify. Selenium Commands . The test case then “asserts” the first column in the second row of the first table contains the expected value.0 Command open assertTitle verifyText assertTable verifyTable verifyTable Target /download/ Downloads //h2 1. etc. 2008 1. This verification does not check the text. It takes a single argument–the text pattern to be verified.2 verifyTextPresent The command verifyTextPresent is used to verify specific text exists somewhere on the page. paragraphs. specified by the existence of an <img> HTML tag. 5. Release 1.2. Locators are explained in the next section. 4 verifyText Use verifyText when both the text and its UI element must be tested. the first element with the id attribute value matching the location will be used. 5.2 Locating Elements For many Selenium commands. Use this when you know an element’s id attribute.1 Locating by Identifier This is probably the most common method of locating elements and is the catch-all default when no recognised locator type is used. 5. and consists of the location strategy followed by the location in the format locatorType=location. If no element has a matching id attribute. you can verify that specific text appears at a specific location on the page relative to other UI components on the page. but also more explicit.Selenium Documentation. Locating Elements 35 . If you choose an XPath or DOM locator. 5.2. With this strategy. 5. This target identifies an element in the content of the web application.2 Locating by Id This type of locator is more limited than the identifier locator type.2. verifyText must use a locator. Command verifyText Target //table/tr/td/div/p Value This is my text and it occurs right after the div inside the table. Release 1.0 5. a target is required. then the first element with a name attribute matching the location will be used.1. The locator type can be omitted in many cases. For instance. The various locator types are explained below with examples for each.2. You may or may not want to also test whether the page structure changes. As HTML can be an implementation of XML (XHTML). Selenium users can leverage this powerful language to target elements in their web applications. XPath extends beyond (as well as supporting) the simple methods of locating by id 36 Chapter 5. becomes very important. 5. Selenium Commands .2.. In the case where web designers frequently alter the page. If multiple elements have the same value for a name attribute.4 Locating by XPath XPath is the language used for locating nodes in an XML document. The default filter type is value (matching the value attribute). then you can use filters to further refine your location strategy. the test will still pass. So if the page structure and organization is altered. testing via id and name attributes. but its functionality must be regression tested.Selenium Documentation. the three types of locators above allow Selenium to test a UI element independent of its location on the page.3 Locating by Name The name locator type will locate the first element with a matching name attribute. or really via any HTML property. Absolute path (would break if the HTML was changed only slightly) • //form[1] (3) . Since only xpath locators start with “//”.0 or name attributes. and opens up all sorts of new possibilities such as locating the third checkbox on the page. but in order to learn more.First input child element of the form element with attribute named ‘id’ and the value ‘loginForm’ • //input[@name=’continue’][@type=’button’] (7) .) .2.First input element with attribute named ‘name’ and the value ‘username’ • //form[@id=’loginForm’]/input[1] (4) . This is much less likely to change and can make your tests more robust. it is not necessary to include the xpath= label when specifying an XPath locator.Input with attribute named ‘name’ and the value ‘continue’ and attribute named ‘type’ and the value ‘button’ • //form[@id=’loginForm’]/input[4] (7) . the following references are recommended: • W3Schools XPath Tutorial • W3C XPath Recommendation 5. Absolute XPaths contain the location of all elements from the root (html) and as a result are likely to fail with only the slightest adjustment to the application. Release 1. or relative to an element that does have an id or name attribute. One of the main reasons for using XPath is when you don’t have a suitable id or name attribute for the element you wish to locate. You can use XPath to either locate the element in absolute terms (not advised). By finding a nearby element with an id or name attribute (ideally a parent element) you can locate your target element based on the relationship.Fourth input child element of the form element with attribute named ‘id’ and value ‘loginForm’ These examples cover some basics. Locating Elements 37 .Selenium Documentation.First form element with an input child element with attribute named ‘name’ and the value ‘username’ • //input[@name=’username’] (4) .The form element with attribute named ‘id’ and the value ‘loginForm’ • xpath=//form[input/\@name=’username’] (4) . XPath locators can also be used to specify elements via attributes other than id and name.First form element in the HTML • xpath=//form[@id=’loginForm’] (3) . html" >Cancel</a> </body> <html> • link=Continue (4) • link=Cancel (5) 5.forms[’loginForm’] (3) • dom=document.0 • XPath Tutorial . 5.forms[0] (3) 38 Chapter 5.6 Locating by DOM The Document Object Model represents an HTML document and can be accessed using JavaScript. There are also a couple of very useful Firefox Add-ons that can assist in discovering the XPath of an element: • XPath Checker . Release 1.suggests XPath and can be used to test XPath results. • Firebug . Selenium Commands .with interactive examples.2. 1 2 3 4 5 6 7 8 9 10 <html> <body> <form id= "loginForm" > <input name= "username" <input name= "password" <input name= "continue" <input name= "continue" </form> </body> <html> type= type= type= "password" /> "submit" value= "Login" /> "button" value= "Clear" /> • dom=document. If two links with the same text are present. which can be simply the element’s location using the hierarchical dotted notation.html" >Continue</a> <a href= "cancel. This location strategy takes JavaScript that evaluates to an element on the page.5 Locating Hyperlinks by Link Text This is a simple method of locating a hyperlink in your web page by using the text of the link. then the first match will be used.Selenium Documentation.2.getElementById(’loginForm’) (3) • dom=document.XPath suggestions are just one of the many powerful features of this very useful add-on. Since only dom locators start with “document”. 1 2 3 4 5 6 7 <html> <body> <p>Are you sure you want to do this?</p> <a href= "continue. it is not necessary to include the dom= label when specifying a DOM locator.].2.2. 5.elements[0] (4) • document.2.forms[0]. 5. 5. A good reference exists on W3Schools. You’ll find additional references there. These Selectors can be used by Selenium as another locating strategy.elements[’username’] (4) • document. the best place to go is the W3C publication.8 Implicit Locators You can choose to omit the locator type in the following situations: • Locators without an explicitly defined locator strategy will default to using the identifier locator strategy.forms[0].Selenium Documentation. CSS uses Selectors for binding style properties to elements in the document. Release 1. Locating Elements 39 .passfield (5) • css=#loginForm input[type="button"] (4) • css=#loginForm input:nth-child(2) (5) For more information about CSS Selectors.required[type="text"] (4) • css=input.username (4) • document. See Locating by Identifier.elements[3] (7) You can use Selenium itself as well as other sites and extensions to explore the DOM of your web application.0 • document. Note: Most experienced Selenium users recommend CSS as their locating strategy of choice as it’s considerably faster than XPath and can find the most complicated objects in an intrinsic HTML document.7 Locating by CSS CSS (Cascading Style Sheets) is a language for describing the rendering of HTML and XML documents.forms[0]. The glob pattern’s asterisk will match “anything or nothing” between the word “Film” and the word “Television”. verifyText. Command click verifyTitle Target link=glob:Film*Television Department glob:*Film*Television* Value The actual title of the page reached by clicking on the link was “De Anza Film And Television Department . or many characters. 5. if the page’s owner should shorten the title to just “Film & Television Department. Examples of commands which require patterns are verifyTextPresent. verifyAlert. what text is expected rather than having to specify that text exactly.e. See Locating by DOM 5. To specify a globbing pattern parameter for a Selenese command. By using a pattern rather than the exact text. patterns are a type of parameter frequently required by Selenese commands.0 • Locators starting with “//” will use the XPath locator strategy. and exact. However. verifyTitle. In this case. assertConfirmation. by using a pattern rather than the exact text. the click command will work even if the link text is changed to “Film & Television Department” or “Film and Television Department”. and verifyPrompt. globbing includes a third special character. However. globbing is used to display all the files ending with a .” the test would still pass. There are three types of patterns: globbing.” i. nothing. link locators can utilize a pattern.” A dash (hyphen) can be used as a shorthand to specify a range of characters (which are contiguous in the ASCII character set). because globbing patterns are the default. Using a pattern for both a link and a simple test that the link worked (such as the verifyTitle above does) can greatly reduce the maintenance for such test cases. the verifyTitle will pass as long as the two words “Film” and “Television” appear (in that order) anywhere in the page’s title.c.Menu”. via the use of special characters.. 40 Chapter 5. regular expressions. Below is an example of two commands that use globbing patterns. Selenium Commands . a single character.1 Globbing Patterns Most people are familiar with globbing as it is utilized in filename expansion at a DOS or Unix/Linux command line such as ls *. Release 1. you can prefix the pattern with a glob: label. Only two special characters are supported in the Selenium implementation: * which translates to “match anything. you can also omit the label and specify just the pattern itself.Selenium Documentation. • Locators starting with “document” will use the DOM locator strategy. [ ] (character class) which translates to “match any single character found inside the square brackets. For example. Patterns allow you to describe. Selenium globbing patterns only support the asterisk and character class.”.3. Globbing is fairly limited. And as has been mentioned above. See Locating by XPath.c extension that exist in the current directory.3 Matching Text Patterns Like locators. the ?. Matching Text Patterns 41 . The more complex example below tests that the Yahoo! Weather page for Anchorage. Whereas Selenese globbing patterns support only the * and [ ] (character class) features.2}:[0-9]{2} [ap]m Value Let’s examine the regular expression above one part at a time: Sunrise: * [0-9]{1.html regexp:Sunrise: *[0-9]{1.com/forecast/USAK0012. Below are a subset of those special characters: PATTERN . and a host of tools.” It is the equivalent of the one-character globbing pattern * (a single asterisk). including the Linux/Unix command-line utilities grep. regular expression patterns allow a user to perform many tasks that would be very difficult otherwise. many text editors. and awk. The only differences are the prefix (regexp: instead of glob:) and the “anything or nothing” pattern (. In Selenese.* (“dot star”). sed. Release 1. [] * + ? {1. For example. regexp: [0-9]+ is a simple pattern that will match a decimal number of any length.0 5. This two-character sequence can be translated as “0 or more occurrences of any character” or more simply. The first one uses what is probably the most commonly used regular expression pattern–.2 Regular Expression Patterns Regular expression patterns are the most powerful of the three types of patterns that Selenese supports. A few examples will help clarify how regular expression patterns can be used with Selenese commands. The former is case-sensitive. the latter is case-insensitive.yahoo.* instead of just *).*Television Department regexp:.3. Regular expressions are also supported by most high-level programming languages. Command click verifyTitle Target link=regexp:Film. Selenese regular expression patterns offer the same wide array of special characters that exist in JavaScript. Alaska contains info on the sunrise time: Command open verifyTextPresent Target.*Film.Selenium Documentation. “anything or nothing.*Television. suppose your test needed to ensure that a particular table cell contained nothing but a number.3.* Value The example above is functionally equivalent to the earlier example that used globbing patterns for this same test. ” it would be the option selected rather than the “Real *” option. clickAndWait) tells Selenium to wait for the page to load after the action has been done. 5.Selenium Documentation. if you needed to look for an actual asterisk character (which is special for both globbing and regular expression patterns). it simply runs in sequence. while the AndWait alternative (e. the following code might work or it might not. as waitForElementPresent or waitForVisible.). globbing patterns and regular expression patterns are sufficient for the vast majority of us.5 The waitFor Commands in AJAX applications In AJAX driven web applications. Selenium Commands . For example. Using andWait commands will not work as the page is not actually refreshed. It uses no special characters at all. your test will fail. the exact pattern would be one way to do that. leading to test failures. So. one command after another. checking for the desired condition every second and continuing to the next command in the script as soon as the condition is met. The AndWait alternative is always used when the action causes the browser to navigate to another page or reload the present one.3 Exact Patterns The exact type of Selenium pattern is of marginal usefulness. The asterisk in the glob:Real * pattern will match anything or nothing. So. Pausing the test execution for a certain period of time is also not a good approach as web element might appear later or earlier than the stipulated period depending on the system’s responsiveness. 5. click) will do the action and continue with the following command as fast as it can. 42 Chapter 5. select //select glob:Real * In order to ensure that the “Real *” item would be selected.3. Thus.0 5.6 Sequence of Evaluation and Flow Control When a script runs. Be aware. if you wanted to select an item labeled “Real *” from a dropdown. data is retrieved from server without refreshing the page. This happens because Selenium will reach the AndWait‘s timeout without seeing any navigation or refresh being made. This is done using waitFor commands.4 The “AndWait” Commands The difference between a command and its AndWait alternative is that the regular command (e. which wait dynamically. Release 1. if you use an AndWait command for an action that does not trigger a navigation/refresh.g. The best approach would be to wait for the needed element in a dynamic period and then continue the execution as soon as the element is found.g. causing Selenium to raise a timeout exception. if there was an earlier select option labeled “Real Numbers. 5. load or other uncontrolled factors of the moment. It simply stores a boolean value–“true” or “false”–depending on whether the UI element is found. Run the script using Selenium-RC and a client library such as Java or PHP to utilize the programming language’s flow control features.). Release 1. the text value to be stored and a selenium variable.0 Selenese. does not support condition statements (if-else. Command store Target paul@mysite. Selenium variables can be used to store values passed to your test program from the command-line. However. However.7 Store Commands and Selenium Variables You can use Selenium variables to store constants at the beginning of a script. you’ll want to use the stored value of your variable. Command verifyText Target //div/p Value ${userName} A common use of variables is for storing input for an input field. Install the goto_sel_ide.) or iteration (for.js extension. programming logic is often needed. enclose the variable in curly brackets ({}) and precede it with a dollar sign like this. or from a file. 3. A Selenium variable may also be used within a locator expression.1 storeElementPresent This corresponds to verifyElementPresent. Here are a couple more commonly used store commands. for a functional test of dynamic content. some organizations prefer to run their scripts from SeleniumIDE whenever possible (for instance. It takes two parameters. consider a JavaScript snippet or the goto_sel_ide. When flow control is needed. Run a small JavaScript snippet from within the script using the storeEval command. An equivalent store command exists for each verify and assert command. while. 5. by itself. Also. or when programming skills are lacking). 2.org Value userName Later in your script. To access the value of a variable.7. etc. etc. when they have many junior-level people running tests for them.7. The plain store command is the most basic of the many store commands and can be used to simply store a constant value in a selenium variable.js extension. 5.Selenium Documentation. when combined with a data-driven test design (discussed in a later section). Store Commands and Selenium Variables 43 . 5. If this is your case. Use the standard variable naming conventions of only alphanumeric characters when choosing a name for your variable. there are three options: 1. Command type Target id=login Value ${userName} Selenium variables can be used in either the first or second parameter and are interpreted by Selenium prior to any other operations performed by the command. from another program. Many useful tests can be conducted without flow control. possibly involving multiple pages. Most testers will export the test script into a programming language file that uses the Selenium-RC API (see the Selenium-IDE chapter). even when the parameter is not specified to be of type script. is stored in the variable.0 5. in this case the JavaScript String object’s toUpperCase method and toLowerCase method. Selenium Commands . verifyEval. and waitForEval. you must refer to it as storedVars[’yourVariableName’]. normally the Target field (because a script parameter is normally the first or only parameter). StoreText can be used to extract text from the page being tested. In most cases. An associative array has string indexes rather than sequential numeric indexes.7. storeEval. These parameters require no special syntax. It uses a locater to identify specific page text. Command store storeEval storeEval Target Edith Wharton storedVars[’name’].8 JavaScript and Selenese Parameters JavaScript can be used with two types of Selenese parameters: script and non-script (usually expressions). The text.2 storeText StoreText corresponds to verifyText.toUpperCase() storedVars[’name’].3 storeEval This command takes a script as its first parameter. The associative array containing your test case’s variables is named storedVars.2 JavaScript Usage with Non-Script Parameters JavaScript can also be used to help generate values for parameters.1 JavaScript Usage with Script Parameters Several Selenese commands specify a script parameter including assertEval. StoreEval allows the test to store the result of running the script in a variable. you’ll want to access and/or manipulate a test case variable inside the JavaScript snippet used as a Selenese parameter. in this case. However. 5. A Selenium-IDE user would simply place a snippet of JavaScript code into the appropriate field.8.7. Whenever you wish to access or manipulate a variable within a JavaScript snippet. Embedding JavaScript within Selenese is covered in the next section.8. 5.Selenium Documentation. Release 1. if found.toLowerCase() Value name uc lc 5. All variables created in your test case are stored in a JavaScript associative array. as in javascript {*yourCodeHere*}.. 5. special syntax is required–the JavaScript snippet must be enclosed inside curly braces and preceded by the label javascript. Below is an example in which the type command’s second parameter value is generated via JavaScript code using this special syntax: 44 Chapter 5. The Selenese Print Command 45 .10 Alerts. 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 <!DOCTYPE HTML> <html> <head> <script type= "text/javascript" > function output(resultText){ document.toUpperCase()} 5. Popups. } } function show_alert(){ alert( "I’m blocking!" ).open( "newWindow. This is useful for providing informational progress notes in your test which display on the console as your test is running." ). output(response). } else{ output( "Rejected!" ).windowName). if (confirmation==true){ output( "Confirmed. These notes also can be used to provide context within your test result reports. and Multiple Windows Suppose that you are testing a page that looks like this. output( "Alert is gone." ).nodeValue=resultText. } function show_prompt(){ var response = prompt( "What’s the best web QA tool?" . echo statements can be used to print the contents of Selenium variables. } function open_window(windowName){ window.html" .Selenium Documentation.childNodes[0].0 Command store type Target league of nations q Value searchString javascript{storedVars[’searchString’]. } function show_confirm(){ var confirmation=confirm( "Chose an option." ).9 echo . "Selenium" ). echo . Command echo echo Target Testing page footer now. which can be useful for finding where a defect exists on a page in the event your test finds a problem.getElementById( ’output’ ).The Selenese Print Command Selenese has a simple command that allows you to print text to your test’s output. Release 1. Username is ${userName} Value 5. Finally. } </script> </head> <body> 5.9. Release 1. JavaScript pop-ups will not appear. But before we begin covering alerts/confirms/prompts in individual detail.0 34 35 36 37 38 39 40 41 42 43 44 45 <input type= "button" id= <input type= "button" id= <input type= "button" id= <a href= "newWindow. To handle a pop-up. This is because the function calls are actually being overridden at runtime by Selenium’s own JavaScript. just because you cannot see the pop-up doesn’t mean you don’t have do deal with it. as well as moving focus to newly opened popup windows. If you just want to assert that an alert is present but either don’t know or don’t care what text it contains. However. 46 Chapter 5.” But this is Selenium-IDE handling and closing the alert for you.Selenium Documentation. you must call it’s assertFoo(pattern) function. it is helpful to understand the commonality between them. Selenium Commands . open the HTML sample above in a browser and click on the “Show alert” button. you can use assertAlertPresent. Fortunately.1 Alerts Let’s start with asserts because they are the simplest pop-up to handle.” is displayed on the page. I never tried to assert that alert. You must include an assertion of the alert to acknowledge it’s presence. Alerts. This will return true or false. Selenium can cover JavaScript pop-ups. Value You may be thinking “Thats odd. Your test will look something like this: Command open click assertAlert verifyTextPresent Target / btnAlert I’m blocking Alert is gone.] 5. Now run through the same steps with Selenium IDE recording. with false halting the test. and verify the text is added after you close the alert. If you fail to assert the presence of a pop-up your next command will be blocked and you will get an error similar to the following [error] Error: There was an unexpected Confirmation! [Chose an option..10. You’ll notice that after you close the alert the text “Alert is gone. If you remove that step and replay the test you will get the following error [error] Error: There was an unexpected Alert! [I’m blocking!]. To begin. However. with assertConfirmation and assertConfirmationPresent offering the same characteristics as their alert counterparts. This is because the order of events Selenium-IDE records causes the click and chooseCancelOnNextConfirmation to be put in the wrong order (it makes sense if you think about it. because Selenium complains that there is an unhandled confirmation. Release 1. Popups. Your test may look something like this: Command open click chooseCancelOnNextConfirmation assertConfirmation verifyTextPresent Target / btnConfirm Choose and option. and Multiple Windows 47 . You may notice that you cannot replay this test. by default Selenium will select OK when a confirmation pops up.0 5. but click on the “Cancel” button in the popup. Alerts. Try recording clicking on the “Show confirm box” button in the sample page.10. Rejected Value The chooseCancelOnNextConfirmation function tells Selenium that all following confirmation should return false. It can be reset by calling chooseOkOnNextConfirmation.2 Confirmations Confirmations behave in much the same way as alerts.10. 5. then assert the output text. Selenium can’t know that you’re cancelling before you open a confirmation) Simply switch these two commands and your test will run fine.Selenium Documentation. Selenium Commands . Release 1.0 48 Chapter 5.Selenium Documentation. you’ll find examples that demonstrate the advantages of using a programming language for your tests. particularly unexpected errors • database testing • test case grouping • re-execution of failed tests • test case dependency • screenshot capture of test failures Although these tasks are not supported by Selenium directly.CHAPTER SIX SELENIUM-RC 6. Selenium-RC uses the full power of programming languages to create more complex tests like reading and writing files.1 Introduction Selenium-RC is the solution for tests that need more than simple browser actions and linear execution. 6. we will describe how the components of Selenium-RC operate and the role each plays in running your test scripts. and emailing test results. 49 . querying a database. You’ll want to use Selenium-RC whenever your test requires logic not supported by Selenium-IDE. all of them can be achieved by using programming techniques with a language-specific Selenium-RC client library. Selenium-IDE does not directly support: • condition statements • iteration • logging and reporting of test results • error handling. In the Adding Some Spice to Your Tests section.2 How Selenium-RC Works First. What logic could this be? For example. using its JavaScript interpreter.Selenium Documentation. Release 1. executes the Selenium command. The browser. Selenium-RC . • Client libraries which provide the interface between each programming language and the Selenium-RC Server. and acts as an HTTP proxy. This runs the Selenese action or verification you specified in your test script. interprets and runs the Selenese commands passed from the test program. Then the server passes the Selenium command to the browser using Selenium-Core JavaScript commands. intercepting and verifying HTTP messages passed between the browser and the AUT. The diagram shows the client libraries communicate with the Server passing each Selenium command for execution.1 RC Components Selenium-RC components are: • The Selenium Server which launches and kills browsers.. Here is a simplified architecture diagram.. 50 Chapter 6.0 6..2. interprets them.. Release 1.1 Installing Selenium Server The Selenium-RC server is simply a Java jar file (selenium-server.Selenium Documentation.jar). actually a set of JavaScript functions which interprets and executes Selenese commands using the browser’s built-in JavaScript interpreter. you simply need to: • Install the Selenium-RC Server.3 Client Libraries The client libraries provide the programming support that allows you to run Selenium commands from a program of your own design. which doesn’t require any special installation. The client library also receives the result of that command and passes it back to your program.3. The Server receives the Selenese commands from your test program using simple HTTP GET/POST requests. which run Selenium commands from your own program. you simply write a program that runs a set of Selenium commands using a client library API. Within each interface. See the Selenium-IDE chapter for specifics on exporting RC code from Selenium-IDE. 6.0 6. This means you can use any programming language that can send HTTP requests to automate Selenium tests on the browser. These folders have all the components you need for using Selenium-RC with the programming language of your choice. Selenium-Core is a JavaScript program.2 Selenium Server Selenium Server receives Selenium commands from your test program. you can generate the Selenium-RC code. And. 6. or possibly take corrective action if it was an unexpected error.e. there is a programming function that supports each Selenese command. There is a different client library for each supported language.3 Installation After downloading the Selenium-RC zip file from the downloads page. So to create a test program. The client library takes a Selenese command and passes it to the Selenium Server for processing a specific action or test against the application under test (AUT). optionally. i. Your program can receive the result and store it into a program variable and report it as a success or failure. The Selenium-IDE can translate (using its Export menu item) its Selenium commands into a client-driver’s API function calls. A Selenium client library provides a programming interface (API). 6. and reports back to your program the results of running those tests. Installation 51 . • Set up a programming project using a language specific client driver. 6. The RC server bundles Selenium Core and automatically injects it into the browser. Just downloading the zip file and extracting the server in the desired directory is sufficient.3. Once you’ve chosen a language to work with.2. a set of functions.2. you’ll notice it has several subfolders. This occurs when your test program opens the browser (using a client library API function). if you already have a Selenese test script created in the SeleniumIDE. 6. • Run Selenium server from the console.3. • From Selenium-IDE.Selenium Documentation. • Add to your project classpath the file selenium-java-client-driver. Release 1.0 6. NetBeans.jar. or TestNg to run your test. etc. Then make a shortcut to that executable on your desktop and simply double-click the icon to start the server. • Add to your test’s path the file selenium. The API is presented later in this chapter. java -jar selenium-server.3 Using the Java Client Driver • Download Selenium-RC from the SeleniumHQ downloads page.5 or later). see the Appendix sections Configuring Selenium-RC With Eclipse and Configuring Selenium-RC With Intellij.bat on Windows and . export a script to a Java file and include it in your Java project. You can either use JUnit. Selenium-RC . IntelliJ. 6.py • Either write your Selenium test in Python or export a script from Selenium-IDE to a python file. • Open your desired Java IDE (Eclipse.2 Running Selenium Server Before starting any tests you must start the server. Netweaver. Go to the directory where Selenium-RC’s server is located and run the following from a command-line console.jar.) • Create a new project.jar files to your project as references. or you can write your own simple main() program.4 Using the Python Client Driver • Download Selenium-RC from the SeleniumHQ downloads page • Extract the file selenium.sh on Linux) containing the command above. • Execute your test from the Java IDE or from the command-line. you’re ready to start using Selenium-RC.3. You can check that you have Java correctly installed by running the following on a console: java -version If you get a version number (which needs to be 1. For details on Java test project configuration. For the server to run you’ll need Java installed and the PATH environment variable correctly configured to run it from the console.py 52 Chapter 6. or write your Selenium test in Java using the selenium-java-client API.jar This can be simplified by creating a batch or shell executable file (. These concepts are explained later in this section. • Add the selenium-java-client-driver. • Extract the file selenium-java-client-driver.3. 0 • Run Selenium server from the console • Execute your test from a console or your Python IDE For details on Python client driver configuration. install it from RubyForge • Run gem install selenium-client • At the top of your test script. framework. see the Selenium-Client documentation 6. see the appendix Python Client Driver Configuration.5 Using the . These concepts are explained later in this chapter. • Write your own simple main() program or you can include NUnit in your project for running your test. Release 1. VB. nunit.Core. • Execute your test in the same way you would run any other Ruby script.Net).3. ThoughtWorks. For details on Ruby client driver configuration.Selenium.Selenium Documentation.3. see the appendix . Installation 53 . ThoughtWorks. • Run Selenium-RC server from the console. or export a script from Selenium-IDE to a C# file and copy this code into the class file you just created.IntegrationTests. 6.dll. add require "selenium/client" • Write your test script using any Ruby test harness (eg Test::Unit.) • Open your desired .dll and ThoughtWorks.NET client driver configuration with Visual Studio. SharpDevelop.dll. MonoDevelop) • Create a class library (.dll. nunit.Net language (C#.NET client driver configuration.Net IDE (Visual Studio.dll • Write your Selenium test in a . you can also write a simple main() function to run your tests.Selenium.dll.6 Using the Ruby Client Driver • If you do not already have RubyGems. Mini::Test or RSpec).Selenium. If you’re not familiar yet with NUnit. from the NUnit GUI or from the command line For specific details on . 6. however NUnit is very useful as a test engine.UnitTests.3. • Run Selenium server from console • Run your test either from the IDE.dll) • Add references to the following DLLs: nmock.core.NET Client Driver • Download Selenium-RC from the SeleniumHQ downloads page • Extract the folder • Download and install NUnit ( Note: You can use NUnit as your test engine. 4.com 6. System.oriented programming language. System. In C#: using using using using using using System.Text.Threading.Stop(). verificationErrors = new StringBuilder(). If you have at least basic knowledge of an object. NUnit. you will understand how Selenium runs Selenese commands by reading one of these examples. [SetUp] public void SetupTest() { selenium = new DefaultSelenium( "localhost" . ". 4444.RegularExpressions. private StringBuilder verificationErrors. Release 1. namespace SeleniumTests { [TestFixture] public class NewTest { private ISelenium selenium. } [TearDown] public void TeardownTest() { try { selenium.2 Selenese as Programming Code Here is the test script exported (via Selenium-IDE) to each of the supported programming languages. To see an example in a specific language.4. Selenium. clickAndWait btnG assertTextPresent Results * for selenium rc Note: This example would work with the Google search page. 6. Selenium-RC . Imagine recording the following test with Seleniumopen / type q selenium rc IDE.0 6.4 From Selenese to a Program The primary task for using Selenium-RC is to convert your Selenese into a programming language. we provide several different language-specific examples.Selenium Documentation. System.Start().Framework. select one of these buttons.1 Sample Test Script Let’s start with an example Selenese test script.Text. } catch (Exception) 54 Chapter 6. "*firefox" . In this section. selenium. assertTrue(selenium. 4444.1 Java For Java. You can remove the module if it’s not used in your // script. This makes it even easier. Teaching JUnit or TestNG is beyond the scope of this document however materials may be found online and there are publications available.open( "/" ).google. package com. you will need to change the browser-open parameters in the statement: selenium = new DefaultSelenium("localhost".regex. selenium. If you are already a “java-shop” chances are your developers will already have some experience with one of these test frameworks. You will probably want to rename the test class from “NewTest” to something of your own choosing. "selenium rc" ).0 • Python • Ruby • Perl.com/" .com/").5. // This is the driver’s import.thoughtworks. // Selenium-IDE add the Pattern module because it’s sometimes used for // regex validations. selenium. ". Some development environments like Eclipse have direct support for these via plug-ins.click( "btnG" ).google.util. import java.*.selenium. You’ll use this for instantiating a // browser and making it do what you need. "*iehta". // We specify the package of our tests import com. Release 1.tests. selenium. "*firefox" ).type( "q" . people use either JUnit or TestNG as the test engine.Selenium Documentation. This example has comments added manually for additional clarity. // We instantiate and start the browser } public void testNew() throws Exception { selenium.waitForPageToLoad( "30000" ). Also.example. public class NewTest extends SeleneseTestCase { // We create our Selenium test case public void setUp() throws Exception { setUp( ". PHP 6. // These are the real test steps } } 58 Chapter 6.Pattern.isTextPresent( "Results * for selenium rc" )). The Selenium-IDE generated code will look like this. Selenium-RC . You can see this in the generated code below. ". namespace SeleniumTests { [TestFixture] public class NewTest { private ISelenium selenium. 4444.google.google.0 6.RegularExpressions. System. It includes the using statement for NUnit along with corresponding NUnit attributes identifying the role for each member function of the test class. using using using using using using System.NET Client Driver works with Microsoft.com/" ). Release 1. "").Start(). You will probably have to rename the test class from “NewTest” to something of your own choosing.Text.Text.Threading. Programming Your Test 59 . Selenium-IDE assumes you will use NUnit as your testing framework. Also. private StringBuilder verificationErrors. verificationErrors = new StringBuilder(). } [TearDown] public void TeardownTest() { try { selenium. System. "*iehta". It can be used with any . System.Framework. you will need to change the browser-open parameters in the statement: selenium = new DefaultSelenium("localhost". Selenium. } 6. [SetUp] public void SetupTest() { selenium = new DefaultSelenium( "localhost" .5. NUnit.Selenium Documentation.5. The generated code will look similar to this.NET testing framework like NUnit or the Visual Studio 2005 Team System. "*iehta" .Stop().NET. 4444.2 C# The . selenium. // Read the keyed search term and assert it. } } } You can allow NUnit to manage the execution of your tests.ToString()). and TeardownTest() in turn. // Wait for page to load. Or alternatively.Click( "btnG" ). } [Test] public void TheNewTest() { // Open Google search engine.GetTitle()). selenium. verificationErrors.5. 6. selenium. Assert.org" is available in search results.Type( "q" . You’ll use this class for instantiating a # browser and making it do what you need."Selenium OpenQA .google. TheNewTest(). selenium. // Assert that "www. selenium.AreEqual( "Selenium OpenQA . // Click on Search button.WaitForPageToLoad( "5000" ).org" )). you can write a simple main() program that instantiates the test object and runs each of the three methods. Assert.openqa. // Assert Title of page. time. SetupTest(). Selenium-RC . Assert. selenium.Selenium Documentation.3 Python Pyunit is the test framework to use for Python. import unittest.openqa.IsTrue(selenium.GetValue( "q" )). // Assert that page title is . To learn Pyunit refer to its official documentation <( "" .0 catch (Exception) { // Ignore errors if unable to close the browser } Assert.GetTitle()). "Selenium OpenQA" ).Open( ">_.org/library/unittest.python.AreEqual( "Selenium OpenQA" .Google Search" . Release 1. // Provide search term as "Selenium OpenQA" selenium. 60 Chapter 6.Google Search" Assert.com/" ). The basic test structure is: from selenium import selenium # This is the driver’s import.AreEqual( "Google" . selenium. re # This are the basic imports added by Selenium-IDE by default.IsTextPresent( "www. wait_for_page_to_load( " 30000 " ) self.” and change the test method’s name to something other than “test_untitled.start() # We instantiate and start the browser def test_new(self): # This is the test code. as described 6. sel. On line 1. On line 11.0 # You can remove the modules if they are not used in your script.click( " btnG " ) sel. not the old Selenium gem. In fact.google. This is a problem because the official Ruby driver for Selenium is the Selenium-Client gem.selenium" each time we want to call the browser).4 Ruby Selenium-IDE generates reasonable Ruby. Release 1. Here you should put the actions you need # the browser to do during your test.selenium. " *firefox " .TestCase): # We create our unittest test case def setUp(self): self. sel = self. it is advisable to update any Ruby scripts generated by the IDE as follows: 1.new to You probably also want to change the class name to something more informative than “Untitled. 4444.5.is_text_present( " Results * for selenium rc " )) # These are the real test steps def tearDown(self): self. class NewTest(unittest.failUnless(sel. change require "selenium" to require "selenium/client" 2. Programming Your Test 61 . Therefore.verificationErrors) # And make the test fail if we found that any verification errors # were found 6.stop() # we close the browser (I’d recommend you to comment this line while # you are creating and debugging your tests) self. self.open( " / " ) sel.selenium.5. change Selenium::Client::Driver.new Selenium::SeleniumDriver.type( " q " .selenium = selenium( " localhost " .verificationErrors = [] # This is an empty array where we will store any verification errors # we find in our tests self.assertEqual([].selenium # We assign the browser to the variable "sel" (just to save us from # typing "self. " selenium rc " ) sel.” Here is a simple example created by modifying the Ruby code generated by Selenium IDE. but requires the old Selenium gem. the Selenium gem is no longer even under active development.com/ " ) self. " Documentation. require " test/unit " class Untitled < Test::Unit::TestCase # The setup method is called before each test. Release 1. :timeout_in_second => 60 # Start the browser session @selenium. :port => 4444. Ruby 1. if any. :url => "’s default test framework. above.Selenium Documentation. def teardown # Stop the browser session. @selenium = Selenium::Client::Driver. see the examples in the Selenium-Client # documentation.new \ :host => " localhost " . @selenium.open " / " # Type ’selenium rc’ into the field named ’q’ @selenium. @verification_errors = [] # Create a new instance of the Selenium-Client driver. # load the Selenium-Client gem require " selenium/client " # Load Test::Unit. def setup # This array is used to capture errors and display them at the # end of the test run.type " q " .com/ " . @verification_errors end # This is the main body of your test. # If you prefer RSpec. assert_equal [].start # Print a message in the browser-side log and status bar # (optional).stop # Print the array of error messages. Selenium-RC .click " btnG " 62 Chapter 6.0 above.google. " selenium rc " # Click the button named "btnG" @selenium.set_context( " test_untitled " ) end # The teardown method is called after each test. @selenium. def test_untitled # Open the root of the site we specified when we created the # new driver instance. :browser => " *chrome " . @selenium. Selenium Documentation. "*firefox" . @verification_errors << $! end end end 6.6. port => 4444. Here.6 Learning the API The Selenium-RC API uses naming conventions that. If you are using Selenium-RC with either of these two languages please contact the Documentation Team (see the chapter on contributing).com/" . 6.0 # Wait for the search results page to load. "*firefox" ). PHP The members of the documentation team have not used Selenium-RC with Perl or PHP. Release 1. ". # Notice that the star (*) is a wildcard that matches any # number of characters. @selenium.5 Perl.6.wait_for_page_to_load begin # Test whether the search results contain the expected text. assert @selenium.com/" selenium. 6. above.5. assuming you understand Selenese. Learning the API 63 . In Perl: my $sel = Test::WWW::Selenium->new( host => "localhost" .1 Starting the Browser In C#: selenium = new DefaultSelenium( "localhost" . In Java: setUp( ". however. push it onto the array of errors. we explain the most critical and possibly less obvious aspects. 4444. much of the interface will be self-explanatory. We would love to include some examples from you and your experiences. because that # was specified when we created the new driver instance. to support Perl and PHP users.is_text_present( " Results * for selenium rc " ) rescue Test::Unit::AssertionFailedError # If the assertion fails. 6. # Note that we don’t need to set a timeout here.Start(). In Python: self.google.start Each of these examples opens the browser and represents that browser by assigning a “browser instance” to a program variable.type( " field-id " . " *firefox " . Release 1.6. In some clients this is an optional parameter. " *firefox " .com/" ). " @selenium.e. to call the type method of the selenium object: selenium. Usually.new( " localhost " .com/ " ) self. $this->setBrowserUrl(". browser_url => ". url The base url of the application under test.Selenium Documentation. This also is optional in some client drivers. For example. In PHP: $this->setBrowser("*firefox"). Selenium-RC .google.0 browser => "*firefox" .start() In Ruby: @selenium = Selenium::ClientDriver. i. This is a required parameter. The parameters required when creating the browser instance are: host Specifies the IP address of the computer where the server is located. This program variable is then used to call methods from the browser. so in this case localhost is passed.selenium.google. " string to type " ) In the background the browser will actually perform a type operation. 64 Chapter 6. 4444.com/"). browser The browser in which you want to run the tests. This is required by all the client libs and is integral information for starting up the browser-proxy-AUT communication. port Specifies the TCP/IP socket where the server is listening waiting for the client to establish a connection. Note that some of the client libraries require the browser to be started explicitly by calling its start() method. this is the same machine as where the client is running. by using the locator and the string you specified during the method call.2 Running Commands Once you have the browser initialized and assigned to a variable (generally named “selenium”) you can make it run Selenese commands by calling the respective methods from the browser variable. essentially identical to a user typing input into the browser.selenium = selenium( " localhost " . 6. like open or type or the verify commands. These methods execute the Selenium commands. ". 4444. Rather.4 Test Reporting Examples To illustrate. Release 1. it allows you to build your reporting customized to your needs using features of your chosen programming language.NET also has its own.Selenium Documentation. These. See TestNG Report for more. along with their primary function of providing a flexible test engine for executing your tests. learning curve you will naturally develop what works best for your own situation.7.7. Reporting Results 65 . include library code for reporting results. 6. we’ll direct you to some specific tools in some of the other languages supported by Selenium. 6. That may gradually lead to you developing your own reporting.2 Test Report Libraries Also available are third-party libraries specifically created for reporting test results in your chosen programming language.0 6.7 Reporting Results Selenium-RC does not have its own mechanism for reporting results.3 What’s The Best Approach? Most people new to the testing frameworks will being with the framework’s built-in reporting features. but what if you simply want something quick that’s already done for you? Often an existing library or test framework can meet your needs faster than developing your own test reporting code. JUnit and TestNG. We will simply introduce the framework features that relate to Selenium along with some techniques you can apply. There are good books available on these test frameworks however along with information on the internet. 6. For example. .1 Test Framework Reporting Tools Test frameworks are available for many programming languages. possibly in parallel to using a library or test framework.7. • If Selenium Test cases are developed using TestNG then no external task is required to generate test reports. Test Reports in Java • If Selenium Test cases are developed using JUnit then JUnit Report can be used to generate test reports. 6. Regardless. but short. That’s great. The TestNG framework generates an HTML report which list details of tests. As you begin to use Selenium no doubt you will start putting in your own “print statements” for reporting progress. NUnit. From there most will examine any available libraries as that’s less time consuming than developing your own. The ones listed here are commonly used and have been used extensively (and therefore recommended) by the authors of this guide. that’s beyond the scope of this user guide.7. Refer to JUnit Report for specifics. We won’t teach the frameworks themselves here.7. 6. Java has two commonly used test frameworks. These often support a variety of formats such as HTML or PDF. after the initial. See TestNG-xslt for more. You will find as you transition from the simple tests of the existence of page elements to tests of dynamic functionality involving multiple web-pages and varying data that you will require programming logic for verifying expected results. Note: If you are interested in a language independent log of what’s going on.8 Adding Some Spice to Your Tests Now we’ll get to the whole reason for using Selenium-RC. the Selenium-IDE does not support iteration and standard condition statements. colour-coded view of the test results. It is intended as a replacement for the default TestNG HTML report.Selenium Documentation. Logging Selenium extends the Java client driver to add this Selenese logging ability. In addition you can report progress information using I/O. Please refer to Logging Selenium. take a look at Selenium Server Logging 6. ReportNG provides a simple. • Also.0 • ReportNG is a HTML reporting plug-in for the TestNG framework. Test Reports for Ruby • If RSpec framework is used for writing Selenium Test Cases in Ruby then its HTML report can be used to generate a test report. Program flow is controlled using condition statements and iteration. A TestNG-xslt Report looks like this. 66 Chapter 6. Test Reports for Python • When using Python Client Driver then HTMLTestRunner can be used to generate a Test Report. See ReportNG for more. Refer to RSpec Report for more. Release 1. In this section we’ll show some examples of how programming language constructs can be combined with Selenium to solve common testing problems. Basically. See HTMLTestRunner. adding programming logic to your tests. for a very nice summary report try using TestNG-xslt. It’s the same as for any program. Selenium-RC . Logging the Selenese Commands • Logging Selenium can be used to generate a report of all the Selenese commands in your test along with the success or failure of each. You can do some conditions by embedding javascript in Selenese parameters. we can iterate over the search results for a more flexible and maintainable solution. In addition.0 however iteration is impossible. sel. sel. "rc" ." .open( "/" ).8.click( "btnG" ). perhaps for verifying your test results you need to process a “result set” returned from a database. For example. If you have some basic knowledge of an object-oriented programming language you shouldn’t have difficulty understanding this section. although the code is simple and can be easily adapted to the other supported languages. Using the same Google search example we used earlier. you may want to to execute a search multiple times. // Execute loop for each String in array ’arr’. foreach (String s in arr) { sel. Release 1. But multiple copies of the same code is not good program practice because it’s more work to maintain. For example. "grid" }. and most conditions will be much easier in a programming language.type( "q" . sel.8.waitForPageToLoad( "30000" ). we have written this section to illustrate the use of common programming techniques to give you greater ‘verification power’ in your automated testing.isTextPresent( "Results * for selenium " + s)). Adding Some Spice to Your Tests 67 .8. Or. The examples in this section are written in C# and Java.. In C#: // Collection of String values.2 Condition Statements To illustrate using conditions in tests we’ll start with an example. By using a programming language. let’s check the Selenium search results. "selenium " +s). assertTrue( "Expected text: " +s+ " is missing on page. } 6. A common problem encountered while running Selenium tests occurs when an expected element is not available on page. For these reasons and others.1 Iteration Iteration is one of the most common things people need to do in their tests. sel. you may need exception handling for error recovery.Selenium Documentation. when running the following line: 6. String[] arr = { "ide" . // If element is available on page then perform type operation. A better approach is to first validate if the element is really present and then take alternatives when it it is not.getElementsByTagName(’input’). // Counter for check box ids. script += "inputId. // end of for." + // increment the counter. But often that is not desirable as your test script has many other subsequent tests to perform.id !=null " + "&& inputFields[i]." ." .// Convert array in to string." ." .type( "q" . // Split the s return checkboxIds. script += "for(var i=0.isElementPresent( "q" )) { selenium. The getEval method of selenium API can be used to execute JavaScript from selenium RC. if(selenium.8. If element ‘q’ is not on the page then an exception is thrown: com.3 Executing JavaScript from Your Test JavaScript comes very handy in exercising an application which is not directly supported by selenium. // Create array in java scrip script += "inputFields = window.Selenium Documentation." ) } The advantage of this approach is to continue with test execution even if some UI elements are not available on page.document.split( ". script += "var inputFields = new Array(). For some tests that’s what you want.thoughtworks.id . String[] checkboxIds = selenium. "selenium " +s).SeleniumException: ERROR: Element q not found This can cause your test to abort. } To count number of images on a page: 68 Chapter 6." ). "}" + // end of if.getAttribute(’type’) == ’checkbox’) {" . In this case one could evaluate JavaScript from selenium RC to get ids of all check boxes and then exercise them.printf( "Element: " +q+ " is not available on page. 6. Let’s look at this using Java. } else { System. Release 1.// Create array in java scri script += "var cnt = 0.id !=’undefined’ " + "&& inputFields[i]. // Loop through the script += "if(inputFields[i]. // If input fie script += "inputId[cnt]=inputFields[i]. Consider an application having check boxes with no static identifiers. Selenium-RC .0 selenium.type( "q" . i++) {" . "}" ." ." + // Save check box id to inp "cnt++. public static String[] getAllCheckboxIds () { String script = "var inputId = new Array(). "Selenium rc" ).toString().out.length.selenium. i<inputFields.getEval(script). http.9 Server Options When the server is launched.jar -Dhttp.9. Selenium by default ran the application under test in a sub frame as shown here. $ java -jar selenium-server.length. The provided descriptions will not always be enough. Remember to use window object in case of DOM expressions as by default selenium window is referred to.proxyPassword using the following command. prior to version 1. Server Options 69 .proxyPort=8080 -Dhttp.jar To see the list of options.getEval( "window. Release 1. $ java -jar selenium-server. However. so we’ve provided explanations for some of the more important options." ).Selenium Documentation. Recall.0 selenium.proxyPort.2 Multi-Window Mode If you are using Selenium 1.9. $ java -jar selenium-server. not the test window. 6. command line options can be used to change the default server behaviour.0 you can probably skip this section.proxyHost. http.proxyHost=proxy.images.9. 6. 6. 6. run the server with the -h option. since multiwindow mode is the default behavior.0. the server is started by running the following.com -Dhttp.document.jar -h You’ll see a list of all the options you can use with the server and a brief description of each.proxyUser and http.1 Proxy Configuration If your AUT is behind an HTTP proxy which requires authentication then you should configure http. 0 Some applications didn’t run correctly in a sub frame. 70 Chapter 6. Selenium-RC . The multi-window mode option allowed the AUT to run in a separate window rather than in the default frame where it could then have the top frame it required. Release 1. and needed to be loaded into the top frame of the window.Selenium Documentation. Open the Windows Start menu. you can probably skip this section.0.exe -profilemanager 6. Server Options 71 . First. so if you are using Selenium). if you want to run your test within a single frame (i.0.0 For older versions of Selenium you must specify multiwindow mode explicitly with the following option: -multiwindow As of Selenium-RC 1. you will need to explicitly specify the profile.9. using the standard for earlier Selenium versions) you can state this to the Selenium Server using the option -singlewindow 6. select “Run”.9.Selenium Documentation.0 and later runs in a separate profile automatically.3 Specifying the Firefox Profile Firefox will not run two instances simultaneously unless you specify a separate profile for each instance. However. then type and enter one of the following: firefox.e. Selenium-RC 1. follow this procedure. Release 1. to create a separate Firefox profile. SeleniumDriverResourceHandler Browser 465828/:top frame1 posted START NEW The message format is 72 Chapter 6.9.9.5 Selenium Server Logging Server-Side Logs When launching selenium server the -log option can be used to record valuable debugging information reported by the Selenium Server to a text file.jar -htmlSuite "*firefox" ". the server will start the tests and wait for a specified number of seconds for the test to complete. . Selenium-RC . For example: 20:44:25 DEBUG [12] org.selenium.exe -P Create the new profile using the dialog.jar -log selenium. This command line is very long so be careful when you type it. 6. regardless of whether they are profile files or not. Also be aware the -htmlSuite option is incompatible with -interactive You cannot run both at the same time. Then when you run Selenium Server. and the ID number of the thread that logged the message. More information about Firefox profiles can be found in Mozilla’s Knowledge Base 6. java -jar selenium-server. run all the tests and save a nice HTML report with the results. Note this requires you to pass in an HTML Selenese suite. not a single test. if the test doesn’t complete within that amount of time.log This log file is more verbose than the standard console logs (it includes DEBUG level logging messages). Note: When using this option.google.server. Release 1. the command will exit with a non-zero exit code and no results file will be generated.0 firefox.openqa. The log file also includes the logger name.com" "c:\absolute This will automatically launch your HTML suite. these can be more useful to the end-user than the regular Selenium Server logs. Selenium-Core (and its JavaScript commands that make all the magic happen) must be placed in the same origin as the Application Under Test (same URL). This security restriction is applied by every browser in the market and its objective is to ensure that a site’s content will never be accessible by a script from another site.0 TIMESTAMP(HH:mm:ss) LEVEL [THREAD] LOGGER . a script placed on any website you open would be able to read information on your bank account if you had the account page opened on other tab. to log browserSideLogs (as well as all other DEBUG level logging messages) to a file. It cannot perform functions on another website. Browser-Side Logs JavaScript on the browser side (Selenium Core) also logs important messages.MESSAGE This message may be multiline.11. 6. in many cases. Release 1. 6. it cannot run that loaded code against www. Specifying the Path to a Specific Browser 73 . This is called XSS (Cross-site Scripting).mysite2. When specifying the run mode. So for example.com.mysite.11 Selenium-RC Architecture Note: This topic tries to explain the technical implementation behind Selenium-RC.10. this is used to allow your tests to run against a browser not directly supported by Selenium-RC. It’s not fundamental for a Selenium user to know this. but could be useful for understanding some of the problems you might find in the future.Selenium Documentation. 6. Also. if the browser loads JavaScript code when it loads -browserSideLog -browserSideLog must be combined with the -log argument. If this were possible. pass the -browserSideLog argument to the Selenium Server. The Same Origin Policy dictates that any code loaded within the browser can only operate within that website’s domain.com–even if that’s another of your sites. To access browser-side logs. To understand in detail how Selenium-RC Server works and why it uses proxy injection and heightened privilege modes you must first understand the same origin policy. use the *custom specifier followed by the full path to the browser’s executable: *custom <path to browser> 6.10 Specifying the Path to a Specific Browser You can specify to Selenium-RC a path to a specific browser. To work within this policy.1 The Same Origin Policy The main restriction that Selenium faces is the Same Origin Policy. java -jar selenium-server. This is useful if you have different versions of the same browser and you wish to use a specific one. 2 The browser is launched with a configuration profile that has set localhost:4444 as the HTTP proxy. essentially. The proxy is a third person in the middle that passes the ball between the two parts.11. tells the browser that the browser is working on a single “spoofed” website that the Server provides. In Proxy Injection Mode.Selenium Documentation. 1 74 Chapter 6. Its use of the Selenium Server as a proxy avoids this problem. Release 1. restricted by the Same Origin Policy. however. Selenium-RC is not.. It. that sits between the browser and the Application Under Test. Selenium-RC .0 Historically. the Selenium Server acts as a client-configured 1 HTTP proxy 2 . It acts as a “web server” that delivers the AUT to the browser.2 Proxy Injection The first method Selenium used to avoid the The Same Origin Policy was Proxy Injection. Here is an architectural diagram. Selenium-Core was limited by this problem since it was implemented in JavaScript. 6. It then masks the AUT under a fictional URL (embedding Selenium-Core and the set of tests and delivering them as if they were coming from the same origin). Being a proxy gives Selenium Server the capability of “lying” about the AUT’s real URL. The client-driver passes a Selenese command to the server. The client/driver establishes a connection with the selenium-RC server. 5. Release 1. Selenium-RC server launches a browser (or reuses an old one) with a URL that injects SeleniumCore’s JavaScript into the browser-loaded web page. Selenium-RC Architecture 75 . 3. The Server interprets the command and then triggers the corresponding JavaScript execution to execute that command within the browser. Selenium-RC server communicates with the Web server asking for the page and once it receives it.Selenium Documentation. 6.0 As a test suite starts in your favorite language.). 6. the following happens: 1.11. 2. 4. The browser receives the open request and asks for the website’s content from the Selenium-RC server (set as the HTTP proxy for the browser to use). typically opening a page of the AUT. 2. 3. which allows websites to do things that are not commonly permitted (as doing XSS.0 8.11. As a test suite starts in your favorite language. Release 1.Selenium Documentation. The client/driver establishes a connection with the selenium-RC server.3 Heightened Privileges Browsers This workflow in this method is very similar to Proxy Injection but the main difference is that the browsers are launched in a special mode called Heightened Privileges. 76 Chapter 6. or filling file upload inputs and pretty useful stuff for Selenium). Here is the architectural diagram. The browser receives the web page and renders it in the frame/window reserved for it. the following happens: 1. Selenium-Core gets the first instruction from the client/driver (via another HTTP request made to the Selenium-RC Server). Selenium-RC . By using these browser modes. Selenium Core is able to directly open the AUT and read/interact with its content without having to pass the whole AUT through the Selenium-RC server. 6. Selenium-RC server launches a browser (or reuses an old one) with a URL that will load SeleniumCore in the web page. 12 Handling HTTPS and Security Popups Many applications switch from using HTTP to HTTPS when they need to send encrypted information such as passwords or credit card information. It responds by alerting you with popup messages. When dealing with HTTPS in a Selenium-RC test. If you are using Selenium 1. Selenium-RC supports this.12. Handling HTTPS and Security Popups 77 . The browser receives the open request and asks the Web Server for the page. However. Release 1. use *chrome or *iehta. and should not be used unless required by legacy test programs. typically opening a page of the AUT. you will not need to install any special security certificates. Selenium-RC.12. To get around this.0 4. Their use will present limitations with security certificate handling and with the running of multiple windows if your application opens additional browser windows. *chrome or *iehta were the run modes that supported HTTPS and the handling of security popups. Once the browser receives the web page. Otherwise. Selenium-Core acts on that first instruction. renders it in the frame/window reserved for it. you may need to explicitly install this security certificate. 6.0 the run modes *firefox or *iexplore are recommended. These were considered ‘experimental modes although they became quite stable and many people used them.0 beta 2 and later use *firefox or *iexplore for the run mode. these older run modes. Using these run modes. Selenium-RC will handle it for you. When this occurs the browser displays security popups. 6.0 beta 1. there are additional run modes of *iexploreproxy and *firefoxproxy. and these popups cannot be closed using Selenium-RC. your browser will trust the application you are testing by installing a security certificate which you already own. for the run mode. if you are running Selenium-RC in proxy injection mode. The browser now thinks untrusted software is trying to look like your application. Another method used with earlier versions of Selenium was to install the Cybervillians security certificate provided with your Selenium installation. In version 1. it will assume that application is not ‘trusted’. In Selenium-RC 1.1 Security Certificates Explained Normally.Selenium Documentation. you must use a run mode that supports this and handles the security certificate for you. When Selenium loads your browser it injects code to intercept messages between the browser and the server. This is common with many of today’s web applications. You specify the run mode when your test program initializes Selenium. In earlier versions. You can check this in your browser’s options or Internet properties (if you don’t know your AUT’s security certificate ask your system administrator). and should not use. to your client machine in a place where the browser can access it. In earlier versions of Selenium-RC. when the browser accesses the AUT using HTTPS.0 you do not need. This tricks the browser into thinking it’s accessing a site different from your AUT and effectively suppresses the popups. 6. To ensure the HTTPS site is genuine. including Selenium-RC 1. These are provided for backwards compatibility only. Most users should no longer need to do this however. the browser will need a security certificate. temporarily. 5. (again when using a run mode that support this) will install its own security certificate. . without using an automatic configuration. then there is a problem with the connectivity between the Selenium Client Library and the Selenium Server. when a browser is not directly supported. Be aware that Mozilla browsers can vary in how they start and stop.g. We present them along with their solutions here.14 Troubleshooting Common Problems When getting started with Selenium-RC there’s a few potential problems that are commonly encountered. See the SeleniumHQ. you can force Selenium RC to launch the browser as-is. 6..1 Unable to Connect to Server When your test program cannot connect to the Selenium Server. If so. be sure you started the Selenium Server.0 6.13. an exception will be thrown in your test program.Selenium Documentation.13 Supporting Additional Browsers and Browser Configurations The Selenium API supports running against multiple browsers in addition to Internet Explorer and Mozilla Firefox. you pass in the path to the browsers executable within the API call.Inner Exception Message: No connection could be made because the target machine actively refused it." (using . Normally this just means opening your browser preferences and specifying “localhost:4444” as an HTTP proxy..14. One may need to set the MOZ_NO_REMOTE environment variable to make Mozilla browsers behave a little more predictably.. in place of *firefox or *iexplore) when your test application starts the browser.e.. you may still run your Selenium tests against a browser of your choosing by using the “*custom” run-mode (i.NET and XP Service Pack 2) If you see a message like this.org website for supported browsers. With this. but instructions for this can differ radically from browser to browser. This can also be done from the Server in interactive mode. For example. It should display this message or a similar one: "Unable to connect to remote server. Consult your browser’s documentation for details. cmd=getNewBrowserSession&1=*custom c: \P rogram Files \M ozilla Firefox \M yBrowser. but if you launch the browser using the “*custom” run mode. 6.1 Running Tests with Different Browser Configurations Normally Selenium-RC automatically configures the browser.exe&2=htt Note that when launching the browser this way. firefox-bin) directly. it’s generally better to use the binary executable (e. you can launch Firefox with a custom configuration like this: cmd=getNewBrowserSession&1=*custom c: \P rogram Files \M ozilla Firefox \f irefox. Selenium-RC . In addition. Unix users should avoid launching the browser using a shell script. you must manually configure the browser to use the Selenium Server as a proxy.exe&2=h 6. 78 Chapter 6.. Release 1. When you use Selenium-IDE to export your script. telnet.3 Selenium Cannot Find the AUT If your test program starts the browser successfully. Check to be sure the path is correct. (500) Internal Server Error This could be caused by • Firefox (prior to Selenium 1. Release 1.4 Firefox Refused Shutdown While Preparing a Profile This most often occurs when your run your Selenium-RC test program against Firefox. This can easily happen. You must manually change the URL to the correct one for your application to be tested.Selenium Documentation. but you already have a Firefox browser session running and. Troubleshooting Common Problems 79 . most people begin by running thier test program (with a Selenium Client Library) and the Selenium Server on the same machine. however. If you have difficulty connecting. your system administrator can assist you. you can use common networking tools like ping. but if the Selenium Server cannot load the browser you will likley see this error.lang. See the section on Firefox profiles under Server Options. the most likely cause is your test program is not using the correct URL. 6.14. sorry.2 Unable to Load the Browser Ok. the connectivity should be fine assuming you have valid TCP/IP connectivity between the two machines. you didn’t specify a separate profile when you started the Selenium Server. If. 6. We recommend beginning this way since it reduces the influence of potential networking problems which you’re getting started.0 When starting with Selenium-RC. To do this use “localhost” as your connection parameter. ifconfig(Unix)/ipconfig (Windows). not a friendly error message. many people choose to run the tests this way. 6. etc to ensure you have a valid network connection. In truth.14.14. • You specified the path to the browser explicitly (using “*custom”–see above) but the path is incorrect. but the browser doesn’t display the website you’re testing. you do want to run Selenium Server on a remote machine. Check the parameters you passed to Selenium when you program opens the browser. it inserts a dummy URL. If unfamilar with these.0) cannot start because the browser is already open and you did not specify a separate profile.14. • The run mode you’re using doesn’t match any browser on your machine. Assuming your operating system has typical networking and TCP/IP settings you should have little difficulty. Also check the user group to be sure there are no known issues with your browser and the “*custom” parameters.RuntimeException: Firefox refused shutdown while preparing a profile Here’s the complete error message from the server: 6. The error from the test program looks like this: Error: java. 6..822 WARN .5..92 does not support Firefox 3.openqa.... java version "1.14.lang..14.. then it must be because the Selenium Server was not correctly configured as a proxy. 16:20:27.. You must make sure that those are correctly configured when Selenium Server launches the browser.0_07" Java(TM) 2 Runtime Environment...Selenium Documentation. or you may simply need to add it to your PATH environment variable.919 INFO . see the section on Specifying a Separate Firefox Profile 6.. you could be having a problem with Internet Explorer’s proxy settings. At times you may be lucky (I was).com HTTP/1.lock To resolve this.com/seleniumserver/“.selenium.5..server.google.0_07-b03... Selenium Server attempts To configure the global proxy settings in the Internet Options Control Panel. For example. Release 1. Try 80 Chapter 6.0_07-b03) Java HotSpot(TM) Client VM (build 1.GET /selenium-server/driver/?cmd=getNewBrowserSession&1=*fir efox&2=http%3a%2f%2fsage-webapp1.qa.1 java.5. use the latest release version of Selenium with the most widely used version of your browser. The Selenium Server requires Java 1.. But don’t forget to check which browser versions are supported by the version of Selenium you are using.FirefoxCustomProfileLaunc her. Selenium-RC .RuntimeException: Firefox refused shutdown while preparing a profile at org.5 Versioning Problems Make sure your version of Selenium supports the version of your browser.java:277) . *iexplore. *opera..browserlaunchers.6 Error message: “(Unsupported major.0 16:20:03. • *iexplore: If the browser is launched using *iexplore. it only appears to exist when the proxy is properly configured.browserlaunchers.7 404 error when running the getNewBrowserSession command If you’re getting a 404 error while attempting to open a page on “. To check double-check your java version. you may need to update the JRE.selenium...FirefoxCustomProfileLaunc her$FileLockRemainedException: Lock file still present! C:\DOCUME~1\jsvec\LOCALS ~1\Temp\customProfileDir203138\parent. Proxy Configuration highly depends on how the browser is launched with *firefox....com. 6.openqa. or *custom..Preparing Firefox profile.server.waitForFullProfileToBeCreated(FirefoxCustomProfileLauncher. Standard Edition (build 1. The “selenium-server” directory doesn’t exist on google. java -version You should see a message showing the Java version... Selenium-RC 0.minor version 49..idc. mixed mode) If you see a lower version number..5 or higher.14. When in doubt. run this from the command line.0)” while starting server This error says you’re not using a correct version of Java. Caused by: org. then the browser will be unable to connect to the Internet. This error can be intermittent. • SSL certificate warnings: Selenium RC automatically attempts to spoof SSL certificates when it is enabled as a proxy. or are no longer available (after the page has started to be unloaded).14. see the Proxy Configuration for more details. Troubleshooting Common Problems 81 . • *custom: When using *custom you must configure the proxy correctly(manually). To check whether you’ve configured the proxy correctly is to attempt to intentionally configure the browser incorrectly.Selenium Documentation. or the wrong port.g. but you may need to configure your 6. Often it is impossible to reproduce the problem with a debugger because the trouble stems from race conditions which are not reproducible when the debugger’s overhead is added to the system. • HTTP basic authentication dialogs: These dialogs prompt for a username/password to login to the site. If you’re encountering 404 errors and have followed this user guide carefully post your results to user group for some help from the user community. see more on this in the section on HTTPS.proxyHost”. Click on the “Connections” tab and click on “LAN Settings”.0 looking at your Internet Options control panel. or with *iehta browser launcher. – You may also try configuring your proxy manually and then launching the browser with *custom. This error can also occur when JavaScript attempts to find UI objects which are not yet available (before the page has completely loaded).com/blah/blah/blah“). You may need to know how to manage these. you’ll need to start Selenium Server with “-Dhttp. Release 1. If you had successfully configured the browser’s proxy settings incorrectly. Permission issues are covered in some detail in the tutorial. – If you need to use a proxy to access the application you want to test. you should never see SSL certificate warnings.. Each type of popup needs to be addressed differently. Try configuring the browser to use the wrong proxy server hostname. use a username and password in the URL. If your browser is configured correctly. This is most typically encountered with AJAX pages which are working with sections of a page or subframes that load and/or reload independently of the larger page. like this: open(“. and so ther are no known issues with this functionality. You may not be able to close these popups by running selenium commands if they are initiated by the browser and not your AUT. *opera) we automatically hard-code the proxy for you. accesses a page from and then accesses a page from) or switching protocols (moving from to). Proxy Injection carefully. Double-check that you’ve configured your proxy settings correctly. as described in RFC 1738. Read the section about the The Same Origin Policy.14.8 Permission Denied Error The most common reason for this error is that your session is attempting to violate the same-origin policy by crossing domain boundaries (e. otherwise you’ll get a 404 error. To login to a site that requires HTTP basic authentication.14. • For other browsers (*firefox. 6.9 Handling Browser Popup Windows There are several kinds of “Popups” that you can get during a Selenium test. 6. which is one way to make sure that one is adjusting the relevant settings. why isn’t my Firefox browser session closing? On Unix/Linux you must invoke “firefox-bin” directly.0 needed to invoke “firefox-bin” directly.” and try again.14.js -> user_pref(“browser. cmd=getNewBrowserSession&1=*firefox /usr/local/firefox/firefox-bin&2= Safari and MultiWindow Mode Note: This section is not yet developed. versions of Selenium before 1. 6. refer to the HTTPS section for how to do this. when it comes time to kill the browser Selenium RC will kill the shell script.11 Firefox *chrome doesn’t work with custom profile Check Firefox profile folder -> prefs.14. If you’re seeing an alert pop-up. leaving the browser running. Note: This section is not yet developed. Selenium relies on interceptors to determine window names as they are being loaded.10 On Linux.alert. like this.Selenium Documentation. Comment this line like this: “//user_pref(“browser. These interceptors work best in catching new windows if the windows are loaded AFTER the onload() function.12 Is it ok to load a custom pop-up as the parent page is loading (i. • modal JavaScript alert/confirmation/prompt dialogs: Selenium tries to conceal those dialogs from you (by replacing window.13 Problems With Verify Commands If you export your tests from Selenium-IDE. 0).c 6.e. so if you are using a previous version. you may find yourself getting empty verify strings from your tests (depending on the programming language used). Release 1.google. On most Linux distributions.onload() function runs)? No.15 Firefox on Linux On Unix/Linux. 6. If executing Firefox through a shell script. 6. make sure that the real executable is on the path. You can specify the path to firefox-bin directly.14.14.startup.prompt) so they won’t stop the execution of your page.page”. Selenium may not recognize windows loaded before the onload function. 0).confirm and window. Again.14.0 browser to trust our dangerous “CyberVillains” SSL certificate authority. Selenese contains commands for asserting or verifying alert and confirmation popups. before the parent page’s javascript window. so make sure that executable is on the path. window. it’s probably because it fired during the page load process..page”. Selenium-RC . 6. which is usually too early for us to protect the page. See the sections on these topics in Chapter 4. 6. the real firefox-bin is located on: 82 Chapter 6.startup. to add that path to the user’s path.x/ Where the x. you will have to add the following to your .x is the version number you currently have.0 /usr/lib/firefox-x. So.x/firefox-bin " 6. For example: //td[@style="background-color:yellow"] This would work perfectly in Firefox.x.17 Where can I Ask Questions that Aren’t Answered Here? Try our user group 6. So.x.x.14. you should use: //td[@style="BACKGROUND-COLOR:yellow"] This is a problem if your test is intended to work on multiple browsers. but you can easily code your test to detect the situation and try the alternative locator that only works in IE. Troubleshooting Common Problems 83 .x/" If necessary.x. Release 1.16 IE and Style Attributes If you are running your tests on Internet Explorer and you cannot locate elements using their style attribute.bashrc file: export PATH= "$PATH:/usr/lib/firefox-x.14. you can specify the path to firefox-bin directly in your test. Opera or Safari but not with IE.Selenium Documentation. 6. like this: " *firefox /usr/lib/firefox-x.14. IE interprets the keys in @style as uppercase. even if the source code is in lowercase. Release 1.0 84 Chapter 6.Selenium Documentation. Selenium-RC . We will define some terms here to help us categorize the types of testing typical for a web-application. If your page content is not likely to be affected then it may be more efficient to test page content manually. privacy policy. and whether to automate those tests or not. 85 . Once the project boundaries are defined though. a content test. 7. non-changing. UI element. • Does the application’s home page contain an image expected to be at the top of the page? • Does each page of the website contain a footer area with links to the company contact page.1 Introducing Test Design In this subsection we describe a few types of different tests you can do with Selenium.2. 7.1 Testing for Expected Content The simplest type of test. the tester.2 What to Test? What parts of your application should you test? That depends on aspects of your project: user expectations. is a simple test for the existence of a static.CHAPTER SEVEN TEST DESIGN CONSIDERATIONS NOTE: Some sections of this chapter are not yet complete. your application will be undergoing platform changes. For instance • Does each page have its expected page title? This can be used to verify your test found an expected page after following a link. 7. although the concepts we present here are typical for webapplication testing. We’ve created a few terms here of our own for the purposes of categorizing the types of test you may perform on your web application. does each page have the correct text within that header? You may or may not need content tests. This may not be new to you. will certainly make many decisions on what to test. priorities set by the project manager and so on. content tests may prove valuable. or files will likely be moved to different locations. you. If. and trademarks information? • Does each page begin with heading text using the <h1> tag? And. These terms are by no means standard. time allowed for the project. but we provide this as a framework for relating Selenium test automation to the decisions a quality assurance professional will make when deciding what tests to perform. the priority for each of those tests. however. Testing for these involves clicking each link and verifying the expected page behind that link loads correctly.3 Function Tests These would be tests of a specific function within your application.2 Testing Links A frequent source of errors for web-sites is broken links and missing pages behind those broken links. Should that go in this section or in a separate section? 7. for a particular search. drop-down lists. had a unique Identifier for each specific document. Dynamic names are usually on a result page of some given function. These names can also be dynamic . when you enter a search term into Google. Then. Test Design Considerations . etc. requiring some type of user input.generated fresh. and ‘q2’ on the next instance of the same page. In Ajax-driven web applications.2.Selenium Documentation. Often a function test will involve multiple pages with a formbased input page containing a collection of input fields. data is retrieved from the application server without refreshing the page. new. Its ID and name (addForm:_ID74:_ID75:0:_ID79:0:checkBox) are same and both are dynamic (they will change the next time you open the application). a name that can be use to locate the element. This means your test script could be looking at a different field name every time you run the script.2.for example.4 Testing Dynamic Elements Every web page element has an Identifier. Dynamic HTML of an object might look as: <input type= "checkbox" value= "true" id= "addForm:_ID74:_ID75:0:_ID79:0: checkBox" name= "addForm:_ID74:_ID75:0:_ID79:0:checkBox" /> This is HTML snippet for a check box. in. 7. User input can be via text-input fields. the search results page returns a data set with one set of documents and their corresponding Identifiers. some web servers might name a text entry field ‘q1’ on the first instance of a page. For example.0 7. Suppose each data result. These names can be static . An example will help. Dynamic content involve UI elements who Identifying properties change each time you open the page displaying them. check boxes. and one or more response pages. or any other browser-supported input. and unique for every instance of a web page. the text field in which you type your query is always named ‘q’. Need to include a description of how to design this test and a simple example. say for example a list of documents. the search results page returns a different data set where each document in the result set uses different Identifiers. 86 Chapter 7.2. in a different search. Submit and Cancel operations.5 Ajax Tests Ajax is a technology which supports dynamic real-time UI elements such as animation and RSS feeds. Release 1.2. and returning some type of results. An example would be a result set of data returned to the user. So. In this case 7. For example. But you don’t care what the content is. Logging Selenium (Chap 5) is a convenient logging utility for recording the results of verify commands. If you are running hundreds of tests. Verifying Results 87 . each with it’s own log. When constructing your tests. 7. so you have no information on their status. say a paragraph. assertText You should now be familiar with these commands. Which. that is what you want. but will pass the test when the programmers change the HTML used to present that text. or an image. Do you want your test to terminate or continue and record that the check failed? Here’s the trade-off. assume you still need to check that something is on the page. image. a specific image file. In contrast. or specific text.3. The advantage: you have an immediate visual of whether the checks (those using asserts anyway) passed. the test will stop at that point and not run any subsequent checks. or heading text. the text. Rather. Verify When should you use an assert command and when should you use a verify command? This is up to you. verifyTextPresent makes more sense. In this case you can use verify/assertElementPresent. Alternatively. If in doubt. This can fail if either the HTML element (tag) OR the text is not what your test is expecting. The difference is in what you want to happen when the check fails. If you use an assert. for instance if HTML changes frequently by your programmers. Release 1. Sometimes. If the test fails you will immediately know the test did not pass. verify commands will not terminate the test. there are other checks which were never performed. If your test uses only verify commands you are guaranteed (assuming no unexpected exceptions) the test will run to completion whether the checks find defects in the AUT or not. and if the requirements are not clear. Sometimes. the element and it’s text content? (verify/assertText) There is no right answer. you won’t get feedback from TestNG or JUnit. or other content is not to be checked. Most of these are easily changed in either Sel-IDE or Sel-RC. assertElementPresent. (verify/assertElementPresent) • Must I test both. that is. please refer to Chapter 4 first.3. Test engines such as TestNG and JUnit have plugins for commonly used development environments (Chap 5) which conveniently flag these tests as failed tests. You only care that some type of image exists. you will need to look at the results of a console printout or a log output by your test application. That is.3 Verifying Results 7.3. only the HTML tag is what is relevant. However. perhaps often. And you will need to take the time to look through this output every time you run your test. of course. It will ensure that a particular type of element exists (and if using XPath can ensure it exists relative to other objects within the page). perhaps your web-designers are frequently changing the page and you don’t want your test to fail every time they do this because the changes themselves are expected periodically. Realize that verify/assertText is the most specific test. It can check for the content. If not. and the mechanics of using them. you can go with your best guess and can always change the test later. The disadvantage: when a check does fail. It depends on the requirements for your test.Selenium Documentation. depend on the requirements for the application you’re testing.1 Assert vs. however you still need to open the logs and examine the results.2 Trade-offs: assertTextPresent. For Java. you will need to decide • Do I only check that the text exists on the page? (verify/assertTextPresent) • Do I only check that the HTML element exists on the page? That is.0 7. 7. The disadvantage: you have to do more work to examine your test results. this will be time-consuming. all the AUT page elements we have been considering have been static objects. we will contrast that with a static object. With ID and name locators. and XPath appears to the preferred choice. They are easy concepts. If you must test that an image displaying the company logo appears at the top of the page within a header section XPath may be the better locator. For others. you must understand what a dynamic object is. With XPath and DOM you can locate an object with respect to another object on the page. you can use XPath or DOM to specify this. using an ID locator is more efficient as it makes your test code more readable." href= "#" >View Archived Allocation Events</a> This is HTML anchor tag defining an button with an ID attribute of “adminHomeForm”. Its ID remains constant within all instances of this page. 7. you can only specify that they occur on the page– somewhere on the page. Sometimes though. For example. These also give the best performance. Release 1. So. possibly because XPath provides a rich set of possibilities for Identifying an object–it is quite flexible. For example.1 Locating Dynamic Objects First. That is. ’adminHomeForm:_ID38’). 88 Chapter 7.0 Getting a feel for these types of decisions will come with time and a little experience. when this page is displayed. you must use an XPath locator. These are objects who’s html page source is the same each time the page is loaded in the browser. For some projects the requirements are clear and therefore your tests will be clear. but they do depend do depend on the requirements of your AUT. But what are the trade offs of each of these locator types? Recall we can locate an object using • the element ID • the element name attribute • an XPath statement • document object model (DOM) Generally. but it is still a static tag. Using the name attribute also has similar advantages. assuming the ID used by the AUT’s page source is a meaningful one. It’s a fairly complex anchor tag when compared to most HTML tags. The HTML will be the same each time this page is loaded in the browser. and to do so. It appears at the time of writing that DOM locators are not commonly used now. if there is a link that must occur within the second paragraph within a <div> section. <a class= "button" id= "adminHomeForm" onclick= "return oamSubmitForm(’adminHomeForm’. Until now. The purpose of this subsection is to help you anticipate your needs so you can make these decisions more efficiently.4 Choosing a Location Strategy You know from the Selenese section there are multiple ways of selecting an object on a page. and easy to change in your test. and you will have to give it your best guess. There is an advantage to using XPath or DOM that locating via ID or name attributes do not have. for your test script to click this button you simply need to use the following selenium command. not so much. this UI element will always have this Identifier.4. XPath statements have been known to be slow in Internet Explorer due to limitations of IE’s XPath processor. Test Design Considerations .Selenium Documentation. 7. If the page source does not have an ID or name attribute you have no choice but to use an XPath or DOM locator. you do need to use the ID to locate the element. a simple solution would be to just use an XPath locator rather than trying to use an ID locator.Selenium Documentation. Choosing a Location Strategy 89 . again in Selenium-RC selenium.click( "adminHomeForm" ). if it is not the first input element on the page (which it likely is not) try a more detailed XPath statement. Given the dynamically generated Identifier. this approach would not work. a programmed solution is required. before you need to use it in a Selenium command. may generate HTML with IDs that are generated dynamically and therefore the ID itself varies on different instances of the webpage under test. The next time this page is loaded the Identifier will be a different value from the one used in the Selenium command and therefore. HTML for a dynamic page element might look like this. String[] checkboxids = selenium. click //input[3] Or click //div/p[2]/input[3] If however. Its ID and name attributes (both addForm:_ID74:_ID75:0:_ID79:0:checkBox) are dynamically generated values. Another solution is to capture this ID from the website itself. using a standard locator would look something like the following. In this case. // Collect all input IDs on page. So. if(!GenericValidator. 7.indexOf( "addForm" ) > -1) { selenium. { // If the ID starts with addForm if(checkboxids[i].click("addForm:_ID74:_ID75:0:_ID79:0:checkBox).4. For instance. however. The click operation will fail with an “element not found” error. Release 1.IsBlankOrNull(checkboxids[i])) // If collected ID is not null.0 click adminHomeForm Or. click addForm:_ID74:_ID75:0:_ID79:0:checkBox Or.click(checkboxids[i]). To correct this. It can be done like this. in Selenium-RC selenium.getAllFields(). Your application. for the checkbox you can simply use click //input Or. will not be found. <input type= "checkbox" value= "true" id= "addForm:_ID74:_ID75:0:_ID79:0:checkBox" name= "addForm:_ID74:_ID75:0:_ID79:0:checkBox" /> This defines a checkbox. a page with two links having the same name and the same html name. String editInfo = null. Clicking on the second link can be achieved as follows. } // Set the second appearance of Autumn term link to true as isSecondInstanceLink = true. a non-descriptive element id makes it hard for automation testers to keep track of and determine which element IDs to use (which ID is associated with which GUI object).Selenium Documentation. String editTermSectionInfo = selenium. break. // Collect all links. 90 Chapter 7.click(editInfo).isBlankOrNull(linkID)) { // Find the inner HTML of link.0 } } This approach will work if there is only one field whose ID has the text ‘addForm’ appended to it. Now if href is used to click the link. different element IDs could be generated.getAllLinks().document.e.getElementByID(’" +linkID+ "’). element IDs should be explicitly created by the application designer. // Flag for second appearance of link. Second.equalsIgnoreCase( "expectedlink" )) { // If it is second appearance of link then save the link ID and break the loop. // If retrieved link is expected link. each time the application is deployed. not automatically generated. String[] links = selenium. // Desired link. Automatically generated (non-descriptive) element IDs (i. if(editTermSectionInfo.getEval ( "window. if(isSecondInstanceLink) { editInfo = linkID. boolean isSecondInstanceLink = false. selenium. for(String linkID: links) { // If retrieved link is not null if(!GenericValidator.4. id_147) tend to cause two problems: first. 7. If the element ID changes then your test will fail. // Loop through collected links. Consider one more example of a Dynamic object. } } } // Click on link. Release 1.innerHTML" ).2 How can I avoid using complex XPath expressions in my test? Where possible. Test Design Considerations . it would always be clicking on the first element. Release 1. search) sel.2 Recovering From Failure A quick note though–recognize that your programming language’s exception.sqlserver. // Get connection to DB.0 sel.168. generally handle this as it’s a common reason for test automation. 7. Test automation tools. why not use them for some data validations/retrieval on the Application Under Test? Consider the example of a registration process where a registered email address is to be retrieved from the database. String url = "jdbc:sqlserver://192.3 Database Validations Since you can also do database queries from your favorite programming language. 7. 94 Chapter 7. Refer to the Selenium RC wiki for examples of reading data from spread sheet or using data provider capabilities of TestNG with java client driver.DatabaseName=TEST_DB" .8 Handling Errors Note: This section is not yet developed.1 Error Reporting 7. This file contains a different search string on each line.8. but the Idea is to show you things that can easily be done with either a programming or scripting language when they’re difficult or even impossible to do using Selenium-IDE.8.SQLServerDriver" ).click( " btnG " ) sel. This is called Data Driven Testing and is a very common testing task. The Python script above opens a text file.handling support can be used for error handling and recovery. // Prepare connection url.forName( "com. and iterates over the array doing a search and assert on each string.microsoft.8. Class.1. An example of establishing a DB connection and retrieving data from the DB would be: In Java: // Load Microsoft SQL Server JDBC driver. 7.180:1433. This section has not been developed yet.is_text_present( " Results * for " + search)) Why would we want a separate file with data in it for our tests? One important method of testing is to run the same test repetitively with different data values.type( " q " .jdbc. Selenium included. The code then saves this in an array of strings.waitForPageToLoad( " 30000 " ) self. This is a very basic example of what you can do.failUnless(sel.Selenium Documentation. Test Design Considerations . assuming you have database support functions. String emailaddress = result. public static Statement stmt = con. This is a very simple example of data retrieval from a DB in Java. 7.type( "userID" . // Fetch value of "email_address" from "result" object. "password" ).getConnection(url.executeQuery // method which returns the requested information as rows of data in a // ResultSet object. ResultSet result = stmt. Handling Errors 95 .0 public static Connection con = DriverManager. Release 1.8. // Use the fetched value to login to application. selenium.executeQuery ( "select top 1 email_address from user_register_table" ).createStatement().Selenium Documentation. // Send SQL SELECT statements to the database via the Statement. "username" . emailaddress). This wouldn’t take too much work from what you’ve already seen. // Create statement object which would be used in writing DDL and DML // SQL statement. A more complex test could be to validate that inactive users are not able to login to the application.getString( "email_address" ). Release 1.Selenium Documentation. Test Design Considerations .0 96 Chapter 7. seleniumhq.org/how_it_works. and would like to contribute.CHAPTER EIGHT SELENIUM-GRID Please refer to the Selenium Grid website This section is not yet developed. please contact the Documentation Team. We would love to have you contribute. 97 . If there is a member of the community who is experienced in SeleniumGrid. 0 98 Chapter 8. Selenium-Grid . Release 1.Selenium Documentation. For each action foo there is also an action fooAndWait registered. An action method can take up to two parameters. Selenium. which types the text twice into a text box. // Replace the element text with the new text this.findElement(locator). For each accessor there is an assertFoo. verifyFooa nd waitForFoo registered. which will be passed the second and third column values in the test. // Create the text to type var valueToType = text + text. This is done with JavaScript by adding methods to the Selenium object prototype. assertions and locators. The following examples give an indication of how Selenium can be extended with JavaScript. Example: Add a valueRepeated assertion. valueToType).page(). 9. On startup. which will be passed the second and third column values in the test. 9. Selenium will automatically look through methods on these prototypes.2 Actions All methods on the Selenium prototype beginning with “do” are added as actions.page(). Example: Add a “typeRepeated” action to Selenium. text) { // All locator-strategies are automatically handled by "findElement" var element = this. assertions and locator-strategies. that makes sure that the element 99 . An assert method can take up to 2 parameters.doTypeRepeated = function(locator. }. 9.3 Accessors/Assertions All getFoo and isFoo methods on the Selenium prototype are added as accessors (storeFoo). and the PageBot object prototype. but it has not been reviewed and edited.prototype. You can also define your own assertions literally as simple “assert” methods. adding your own actions.1 Introduction It can be quite simple to extend Selenium. which will also auto-generate “verify” and “waitFor” commands.CHAPTER NINE USER-EXTENSIONS NOTE: This section is close to completion. using name patterns to recognize which ones are actions.replaceText(element. Open( "/" ).}. inputParams).Selenium Documentation. } catch (Exception) { // Ignore errors if unable to close the browser } Assert.6.AreEqual( "" . } [Test] public void TheNewTest() { selenium. verificationErrors. } } } 9. Release 1. Using User-Extensions With Selenium RC 103 .DoCommand( "alertWrapper" . string[] inputParams = { "Hello World" .0 [TearDown] public void TeardownTest() { try { selenium.ToString()). proc.Stop(). Selenium Documentation. User-Extensions . Release 1.0 104 Chapter 9. 10.0 AND WEBDRIVER The Selenium developers are working towards a Selenium 2. partly because selenium addresses some shortcomings in webdriver (such as supporting a broader range of browsers) and partly because the main selenium contributors and I felt that it was the best way to offer users the best possible framework. Integrating WebDriver is another step in that process. • Additional functionality addressing testing problems not well-supported in Selenium 1. WebDriver does not depend on a javascript core embedded within the browser. “Why are the projects merging? Partly because webdriver addresses some shortcomings in selenium (by being able to bypass the JS sandbox. The goal is to develop a standardized Selenium API that provides additional support for a larger number of browsers along with improved support for advanced web-app testing problems. The primary new feature will be the integration of the WebDriver API into Selenium-RC. therefore it is able to avoid some long-running Selenium limitations.0. The best explanation for why WebDriver and Selenium are merging was detailed by Simon Stewart. The developers of Selenium and of WebDriver felt they could make significant gains for the Open Source test automation community be combining forces and merging their ideas and technologies.0 release. The Selenium developers strive to continuously improve Selenium.2 When to Use WebDriver? One should use WebDriver when requiring improved support for 105 . WebDriver’s goal is to provide an API that establishes • A well-designed standard programming interface for web-app testing. in a joint email to the WebDriver and Selenium community on August 6.CHAPTER TEN SELENIUM 2. It also provides an alternative API with functionality not supported in Selenium-RC. This will address a number of Selenium 1. 2009.” 10.1 What is WebDriver? WebDriver uses a different underlying framework from Selenium’s javascript Selenium-Core. Integrating WebDriver into Selenium is the current result of those efforts. And we’ve got a gorgeous API). • Improved consistency between browsers. for example. the creator of WebDriver.0 limitations along with providing an alternative programming interface. • AJAX-based UI elements. // not the implementation. import import import import org. // Find the text input element by its name WebElement element = driver.openqa. • Drag-and-drop.name( "q" )). multiple browser windows.selenium. open your favourite IDE and: • Start a new project in your favourite IDE/editor • Add a reference to all the libraries in $WEBDRIVER_HOME You can see that WebDriver acts just as a normal library does: it’s entirely self-contained.WebElement. It aims to provide a friendly API that’s easy to explore and understand. org.selenium. so it can be used equally well in a unit testing or from a plain old “main” method.htmlunit. • Page navigation.By. we’ll refer to that as $WEBDRIVER_HOME.HtmlUnitDriver.findElement(By. From now on. This “Getting Started” guide introduces you to WebDriver’s API and helps get you started becoming familiar with it. // Enter something to search for 106 Chapter 10. 10.Selenium Documentation. It’s not tied to any particular test framework. Release 1. you won’t see a new browser window open.com" ). popups.selenium.0.selenium. which searches for the term “Cheese” on Google and then outputs the result page’s title to the console. package org. which will help make your tests easier to read and maintain.openqa. public class HtmlUnitExample { public static void main(String[] args) { // Create a new instance of the html unit driver // Notice that the remainder of the code relies on the interface.google. This is a pure Java driver that runs entirely in-memory. and you usually don’t need to remember to start any additional processes or run any installers before using it. and alerts.example.WebDriver. Selenium 2.3 The 5 Minute Getting Started Guide WebDriver is a tool for automating testing web applications. and in particular to verify that they work as expected. // And now use this to visit Google driver. WebDriver driver = new HtmlUnitDriver(). Because of this. Now. • Handling multiple frames.openqa.openqa. org.openqa. Start by Downloading the latest binaries and unpack them into a directory.selenium. You’re now ready to write some code. You’ll start by using the HtmlUnit Driver. An easy way to get started is this example.get( " and WebDriver . org.0 • Mult-browser testing including improved functionality for browsers not well-supported by Selenium-1. Selenium. Make sure that Firefox is installed on your machine and is in the normal location for your OS.example.Selenium.openqa. } } HtmlUnit isn’t confined to just Java. element.Remote. //The rest of the code should look very similar to the Java library IWebElement element = driver.Net driver.Console.sendKeys( "Cheese!" ). class Example { static void Main(string[] args) { //to use HtmlUnit from . Below is the same example in C#.ReadLine().ca/" ).Selenium Documentation. you shall use a page that requires Javascript to work properly. //the . which looks like: 10.Name( "q" )).0b1.FindElement(By. System.selenium. Once that’s done.Submit().Console.util.submit(). You should see a line with the title of the Google search results as output on the console. The 5 Minute Getting Started Guide 107 . element.Net we must access it through the RemoteWebDriver //Download and run the selenium-server-standalone-2. IWebDriver driver = new RemoteWebDriver(desiredCapabilities). Note that you’ll need to run the remote WebDriver server to use HtmlUnit from C# using OpenQA.getTitle()).1 Java package org. driver. 10.HtmlUnit(). } } Compile and run this. You will also be using the Firefox Driver.GoToUrl( ". Selenium makes accessing HtmlUnit easy from any language.3.0 element.WriteLine( "Page title is: " + driver. System. you’ve managed to get started with WebDriver! In this next example. Release 1. Congratulations.SendKeys( "Cheese!" ).Title). // Now submit the form.3. // Check the title of the page System. create a new class called GoogleSuggest.jar locally to run this ICapabilities desiredCapabilities = DesiredCapabilities.List. using OpenQA.Quit().Net Webdriver relies on a slightly different API to navigate to //web pages because ’get’ is a keyword in .Navigate(). WebDriver will find the form for us from the element element.out. import java. such as Google Suggest.println( "Page title is: " + driver. Release 1. } } } 10.Selenium Documentation.selenium.get( "(By.openqa.isDisplayed()) { break.google.currentTimeMillis() + 5000. using OpenQA. if (resultsDiv. using System. query. 108 Chapter 10.selenium. public class GoogleSuggest { public static void main(String[] args) throws Exception { // The Firefox driver supports javascript WebDriver driver = new FirefoxDriver(). class GoogleSuggest { static void Main(string[] args) { IWebDriver driver = new FirefoxDriver().selenium.findElement(By.3.2 C# using OpenQA.Firefox. } } // And now list the suggestions List<WebElement> allSuggestions = driver.com/webhp?complete=1&hl=en" ).name( "q" )). org.out.google.Collections. // Go to the Google Suggest home page driver.ObjectModel.GoToUrl( ". // the results are displayed in a drop down. org.0 and WebDriver .RenderedWebElement.Navigate().sendKeys( "Cheese" ).WebDriver. // Sleep until the div we want is visible or 5 seconds is over long end = System.WebElement.findElement(By.println(suggestion.xpath( "//td[@class=’gac for (WebElement suggestion : allSuggestions) { System.c // If results have been returned. org.openqa.By.openqa. while (System.FirefoxDriver. // Enter the query string "Cheese" WebElement query = driver.getText()).selenium.openqa. Selenium 2.currentTimeMillis() < end) { // Browsers which render content (such as Firefox and IE) // return "RenderedWebElements" RenderedWebElement resultsDiv = (RenderedWebElement) driver.com/webhp?complete=1&hl=en" ). driver. org.openqa.0 import import import import import org.firefox. this idea is referred to as “safety”. In the Next Steps section you will learn more about how to use WebDriver for things such as navigating forward and backward in your browser’s history. let’s take the Next Steps! 10.ie. and so can be used to detect information such as the position of an element on a page. This can be measured and quantified. and will depend on their familiarity with the application under test. 0. This varies from person to person.FindElement(By. If you’re ready. query.4.Quit(). but sometimes it’s good to be able to test using a real browser. and your testing framework. This has the added advantage that this driver actually renders content to a screen. Which you use depends on what you want to do. you’ll see the list of suggestions being printed to the console. 5)). Secondly.ChromeDriver You can find out more information about each of these by following the links in the table. and how to use frames and windows.openqa.FindElements(By.chrome.SendKeys( "Cheese" ). Release 1. Often.0 IWebElement query = driver. in this case 5 seconds. Next Steps For Using WebDriver 109 .Console. These are: Name of driver HtmlUnit Driver Firefox Driver Internet Explorer Driver Chrome Driver Available on which OS? All All Windows All Class to instantiate org.selenium.selenium. there’s “perceived safety”.selenium. This means the driver won’t //throw an error if the suggestion box isn’t there.ReadLine(). this will have whet your appetite for more.Console. driver. To support higher “perceived safety”. and it falls into two parts. Instead of telling the //executing thread to sleep we use an implicit wait.FirefoxDriver org.4. which refers to whether or not the tests work as they should.openqa. } } When you run this program.ImplicitlyWait(new TimeSpan(0. //This line is different than the Java version above.firefox.1 Which Implementation of WebDriver Should I Use? WebDriver is the name of the key interface against which tests should be written. but it’s not graphical. the HtmlUnit Driver is great. For sheer speed. which refers to whether or not an observer believes the tests work as they should. there’s “actual safety”. } System.htmlunit.openqa. That’s all there is to using WebDriver! Hopefully. but there are several implementations. Instead it will poll the web //the element is present of the timeout expires.XPath( "/ foreach (IWebElement suggestion in allSuggestions) { System.Timeouts(). driver.selenium. WebDriver.WriteLine(suggestion.InternetExplorerDriver org.Text). It also provides a more complete discussion of the examples than The 5 Minute Getting Started Guide.4 Next Steps For Using WebDriver 10. you may wish to choose a driver such as the Firefox Driver.Name( "q" )).openqa.Manage().HtmlUnitDriver org. especially when you’re showing a demo of your application (or running the tests) for an audience. which means that you can’t watch what’s happening. ReadOnlyCollection<IWebElement> allSuggestions = driver. or the CSS properties that apply to 10. As a developer you may be comfortable with this.Selenium Documentation. Firstly. or.com" ).sendKeys( "some text" ). not all of them will make sense or be valid. then only the first will be returned. Release 1.get( ". WebDriver will wait until the page has fully loaded (that is. It’s worth noting that if your page uses a lot of AJAX on load then WebDriver may not know when it has completely loaded. it is possible to pick the most appropriate driver for a given test. You can also look for a link by its text. element = driver. let’s start with the HtmlUnit Driver: WebDriver driver = new HtmlUnitDriver(). we represent all types of elements using the same interface: Web Element.name( "passwd" )). Don’t worry! WebDriver will attempt to do the Right Thing. 10. but be careful! The text must be an exact match! You should also be careful when using XPATH in WebDriver. for example) an exception will be thrown. The normal way to do this is by calling “get”: driver. the HTML elements within a page. given an element defined as: <input type= "text" name= "passwd" id= "passwd-id" /> you could find it using any of: WebElement element.4. If you need to ensure such pages are fully loaded then you can use “waits”.findElement(By.0 it.Selenium Documentation. So. What can you do with it? First of all. the “onload” event has fired) before returning control to your test or script.xpath( "//input[@id=’passwd-id’]" )). more specifically. WebDriver has an “Object-based” API. we need to find one.google. By writing your tests against the WebDriver interface. However. WebDriver offers a number of ways of finding elements. 10.2 Navigating The first thing you’ll want to do with WebDriver is navigate to a page. and if you call a method that makes no sense (“setSelected()” on a “meta” tag.0 and WebDriver . element = driver.4. For example. What we’d really like to do is to interact with the pages. This means that although you may see a lot of possible methods you could invoke when you hit your IDE’s auto-complete key combination. You can simulate pressing the arrow keys by using the “Keys” class: 110 Chapter 10.3 Interacting With the Page Just being able to go to places isn’t terribly useful. If nothing can be found. you’ve got an element. a NoSuchElementException will be thrown.id( "passwd-id" )). Selenium 2. If there’s more than one element that matches the query. this additional flexibility comes at the cost of slower overall speed. you may want to enter some text into a text field: element. element = driver. To keep things simple.findElement(By.findElement(By. First of all. findElement(By.4 Filling In Forms We’ve already seen how to enter text into a textarea or text field.findElement(By. which makes it possible to test keyboard shortcuts such as those used on GMail. // Assume the button has the ID "submit" :) Alternatively. and cycle through each of it’s OPTIONs in turn. and selecting each in turn.id( "submit" )). Once you’ve finished filling out the form. If the element isn’t in a form. Release 1. A side-effect of this is that typing something into a text field won’t automatically clear it. WebDriver’s support classes include one called “Select”. then the NoSuchElementException will be thrown: element.selectByVisibleText( "Edam" ).tagName( "option" )). what you type will be appended to what’s already there. which provides useful methods for interacting with these. Keys. WebDriver will walk up the DOM until it finds the enclosing form and then calls submit on that.clear(). Select select = new Select(driver.findElement(By.setSelected().out. WebDriver has the convenience method “submit” on every element. Next Steps For Using WebDriver 111 . and then select the OPTION with the displayed text of “Edam”.xpath( "//select" ))). for (WebElement option : allOptions) { System. As you can see. select. option. Instead. and you can use “setSelected” to set something like an OPTION tag selected. } This will find the first “SELECT” element on the page. option.sendKeys( " and some" . 10. select.ARROW_DOWN).0 element. printing out their values. If you call this on an element within a form. One way to do this would be to find the “submit” button and click it: driver.getValue())).println(String.Selenium Documentation. but what about the other elements? You can “toggle” the state of checkboxes.4. It is possible to call sendKeys on any element. Dealing with SELECT tags isn’t too bad: WebElement select = driver.click().xpath( "//select" )).deselectAll().format( "Value is: %s" . you probably want to submit it.submit(). this isn’t the most efficient way of dealing with SELECT elements.4.findElements(By. List<WebElement> allOptions = select. 10. This will deselect all OPTIONs from the first SELECT on the page. You can easily clear the contents of a text field or textarea: element. 10.0.switchTo(). All frames are evaluated as if from *top*.window()” method. and you can specify the frame by its index too. Knowing this.name( "q" )).name( "source" )) RenderedWebElement target = (RenderedWebElement) driver. Not all drivers render their content to the screen (such as the HtmlUnit Driver).4. } You can also swing from frame to frame (or into iframes): driver.child" ). RenderedWebElement element = (RenderedWebElement) element. perhaps to see if it’s visible or where it is on screen. But how do you know the window’s name? Take a look at the javascript or link that opened it: <a href= "somewhere. In addition. or on to another element: RenderedWebElement element = (RenderedWebElement) driver. 112 Chapter 10.getWindowHandles()) { driver. would go to the frame named “child” of the first subframe of the frame called “frameName”. That is: driver.name( "target" )). Selenium 2.findElement(By. but if it does you can gather additional information such as the size and location of the element.Selenium Documentation.0 and WebDriver . WebDriver supports moving between named windows using the “switchTo” method: driver. You can find out this information by casting the element to a RenderedWebElement: WebElement plain = driver. All calls to driver will now be interpreted as being directed to the particular window.findElement(By. either moving an element by a certain amount. element. Release 1.6 Moving Between Windows and Frames It’s rare for a modern web application not to have any frames or to be constrained to a single window.switchTo().frame( "frameName.switchTo().switchTo().0 10.findElement(By.window(handle). so it’s not safe to assume that the cast will work. you can use drag and drop. it’s possible to iterate over every open window like so: for (String handle : driver.4. you can pass a “window handle” to the “switchTo().frame( "frameName" ).window( "windowName" ). It’s possible to access subframes by separating the path with a dot.dragAndDropOn(target).html" target= "windowName" >Click here to open a new window</a> Alternatively.5 Getting Visual Information And Drag And Drop Sometimes you want to extract some visual information out of an element. // And now output all the available cookies for the current URL Set<Cookie> allCookies = driver.manage().println(String.alert().out.switchTo(). the method to do this lives on the main WebDriver interface. but it’s simply a synonym to: driver. This will return the currently open alert object. loadedCookie. This one’s valid for the entire domain Cookie cookie = new Cookie( "key" .g } 10. Refer to the JavaDocs for more information. 10. you need to be on the domain that the cookie will be valid for: // Go to the correct domain driver.7 Popup Dialogs Starting with Selenium 2. read it’s contents or even type into a prompt. for (Cookie loadedCookie : allCookies) { System. 10.8 Navigation: History and Location Earlier.navigate(). Because loading a page is such a fundamental requirement.to()” and “get()” do exactly the same thing. First of all.Selenium Documentation.4.com")) As you’ve seen.4.com" ). we covered navigating to a page using the “get” command ( driver. dismiss.9 Cookies Before we leave these next steps. task-focused interfaces. you may be interested in understanding how to use cookies. This interface works equally well on alerts. "value" ). driver. To reiterate: “navigate().back().forward().example. confirms.getName(). // Now set the cookie.0 10. you can access the alert with the following: Alert alert = driver.example. and navigation is a useful task.addCookie(cookie). It’s just possible that something unexpected may happen when you call these methods if you’re used to the behaviour of one browser over another. loadedCookie.getCookies(). Release 1.format( "%s -> %s" .to( ". there is built in support for handling popup dialog boxes. Please be aware that this functionality depends entirely on the underlying browser.manage(). After you’ve triggerd and action that would open a popup. One’s just a lot easier to type than the other! The “navigate” interface also exposes the ability to move backwards and forwards in your browser’s history: driver.navigate().4.4.get( "" ). driver. WebDriver has a number of smaller.0 beta 1. prompts.example. With this object you can now accept.navigate().get(". Next Steps For Using WebDriver 113 . 10. we reassess this decision: we hope to enable JavaScript by default on the HtmlUnit at some point. but it is no different from any other browser: it has its own quirks and differences from both the W3C standard and the DOM implementations of the major browsers. we had to make a choice. Pros • Fastest implementation of WebDriver • A pure Java solution and so it is platform independent. 114 Chapter 10. If you test JavaScript using HtmlUnit the results may differ significantly from those browsers.1 HtmlUnit Driver This is currently the fastest and most lightweight implementation of WebDriver. When we say “JavaScript” we actually mean “JavaScript and the DOM”.0 10. Selenium 2. You may want to look at the Test Design Considerations chapter to get some ideas about how you can reduce the pain of maintaining your tests and how to make your code more modular. and by default have disabled support when we use HtmlUnit. this is based on HtmlUnit.Selenium Documentation. or do we leave JavaScript disabled.5. With each release of both WebDriver and HtmlUnit.10 Next.0 and WebDriver .5 WebDriver Implementations 10.setJavascriptEnabled(true).4. knowing that there are more and more sites that rely on JavaScript? We took the conservative approach. • Supports JavaScript Cons • Emulates other browser’s JavaScript behaviour (see below) JavaScript in the HtmlUnit Driver None of the popular browsers uses the JavaScript engine used by HtmlUnit (Rhino). despite its ability to mimic other browsers. driver. HtmlUnit has an impressively complete implementation of the DOM and has good support for using JavaScript. Enabling JavaScript If you can’t wait. do we enable HtmlUnit’s JavaScript capabilities and run the risk of teams running into problems that only manifest themselves there. Release 1. As the name suggests. Although the DOM is defined by the W3C each browser out there has its own quirks and differences in their implementation of the DOM and in how JavaScript interacts with it. Next Steps! This has been a high level walkthrough of WebDriver and some of its key capabilities. enabling JavaScript support is very easy: HtmlUnitDriver driver = new HtmlUnitDriver(). With WebDriver. jar on your CLASSPATH. Release 1. carry on reading! Important System Properties The following system properties (read using System.2 Firefox Driver Pros • Runs in a real browser and supports JavaScript • Faster than the Internet Explorer Driver Cons • Slower than the HtmlUnit Driver Before Going Any Further The Firefox Driver contains everything it needs in the JAR file.setProperty() in Java code or the -DpropertyName=value command line flag) are used by the Firefox Driver: Property What it means webThe location of the binary used to control Firefox.zip” which may be downloaded from the website.exe Windows Vista \Program Files (x86)\Mozilla Firefox\firefox.exe By default. If you’re just interested in using this driver. though. This defaults to driver.getProperty() and set using System. and WebDriver will do everything else for you.firefox.profile WebDriver creating an anonymous profile webShould be “true” if temporary files and profiles should not be deleted driver. WebDriver Implementations 115 .5. 10. then all you need to do is put the webdriver-firefox. If you want to dig deeper.firefox.bin webThe name of the profile to use when starting Firefox. the Firefox driver creates an anonymous profile Installing a Downloaded Binary The “wedriver-all.Selenium Documentation.0 This will cause the HtmlUnit Driver to emulate Internet Explorer’s JavaScript handling by default.app/Contents/MacOS/firefox Windows XP %PROGRAMFILES%\Mozilla Firefox\firefox. 10.reap_profile Normally the Firefox binary is assumed to be in the default location for your particular operating system: OS Expected Location of Firefox Linux firefox (found using “which”) Mac /Applications/Firefox.jar or webdriver-all. driver.5. In order to use it: • Copy all the “jar” files on to your CLASSPATH. contains all the dependencies (including the common library) required to run the Firefox Driver. 4 Chrome Driver See below for instructions on how to install the Chrome Driver.3 Internet Explorer Driver This driver has been tested with Internet Explorer 6.0 10.5. JavaScript execution may differ. Release 1. then you will not be able to interact with the page. You do not need to run an installer before using the Internet Explorer Driver.jar on your CLASSPATH. and WebDriver will do everything else for you.5. 7 and 8 on XP. Pros • Runs in a real browser and supports JavaScript • Because Chrome is a Webkit-based browser. the Chrome Driver may allow you to verify that your site works in Safari.Selenium Documentation. Selenium 2. 116 Chapter 10. Pros • Runs in a real browser and supports JavaScript Cons • Obviously the Internet Explorer Driver will only work on Windows! • Comparatively slow (though still pretty snappy :) Installing Simply add webdriver-all. though some configuration is required. If you’re just interested in using this driver. 10. Cons • Slower than the HtmlUnit Driver Before Going Any Further The Chrome Driver contains everything it needs in the JAR file. It has also been successfully tested on Vista. Required Configuration Add every site you intend to visit to your “Trusted Sites” If you do not do this. then all you need to do is put webdriver-all. Note that since Chrome uses its own V8 JavaScript engine rather than Safari’s Nitro engine. Note that Chrome Driver is one of the newest drivers. Please report any problems through the issue tracker.jar to your CLASSPATH.0 and WebDriver . exe Installing a Downloaded Binary The “wedriver-all.click( "name=btnG" ). // Perform actions with selenium selenium.reap_profile What it means The location of the binary used to control Chrome.. Release 1.bin webdriver. the JVM will continue running after 10. // Get the underlying WebDriver implementation back.zip” which may be downloaded from the website.com" .quit().google.getUnderlyingWebDriver() //Finally. Otherwise.chrome. 10. Call stop on the WebDriverBackedSelenium instance //instead of calling driver. Important System Properties The following system properties (read using System. It is used like so: // You may use any WebDriver implementation.getProperty() and set using System.type( "name=q" . selenium. // Create the Selenium implementation Selenium selenium = new WebDriverBackedSelenium(driver.setProperty() in Java code or the -DpropertyName=value command line flag) are used by the Chrome Driver: Property webdriver. "cheese" ).0 The Chrome Driver_ works with Google Chrome version 4.app/Contents/MacOS/GoogleChrome or /User/:username/:as_to_the_left %HOMEPATH%\Local Settings\Application Data\Google\Chrome\Application\chrome. used by selenium to resolve relative URLs String baseUrl = " Emulating Selenium RC The Java version of WebDriver provides an implementation of the Selenium RC API. copy all the “jar” files on to your CLASSPATH. Emulating Selenium RC 117 .6.0 and above. Firefox is used here as an example WebDriver driver = new FirefoxDriver(). This will refer to the // same WebDriver instance as the "driver" variable above.google. // A "base url". contains all the dependencies required to run the Chrome Driver.com" ). In order to use it.exe C:\Users%USERNAME%\AppData\Local\Google\Chrome\Application\chrome. selenium. baseUrl).open( ". WebDriver driverInstance = ((WebDriverBackedSelenium) selenium).Selenium Documentation. close the browser. only the Firefox Driver supports this. RenderedWebElement to = (RenderedWebElement) driver. but you should also expect support for the Internet Explorer Driver too.findElement(By.stop().6. CommandExecutor executor = new SeleneseCommandExecutor( "http:localhost:4444/" .Selenium Documentation. selenium. 10. because we’re using Selenium Core for the heavy lifting of driving the browser. so in order to provide that support while still using the WebDriver API. but if you’re using a browser that supports it you can cast a WebElement to RenderedWebElement and then it’s easy to do drag and drop: // Note the casts RenderedWebElement from = (RenderedWebElement) driver.2 Cons • Does not implement every method • More advanced Selenium usage (using “browserbot” or other built-in JavaScript methods from Selenium Core) may not work • Some methods may be slower due to underlying implementation differences 10.6.id( "one" )).6. 10. Currently.0 and WebDriver . notably that findElements doesn’t work as expected.3 Backing WebDriver with Selenium WebDriver doesn’t support as many browsers as Selenium RC does.1 Using Drag and Drop It may not be immediately obvious. capabilities).id( "two" )). Selenium 2. you can make use of the SeleneseCommandExecutor It is done like this: Capabilities capabilities = new DesiredCapabilities() capabilities. "http:// WebDriver driver = new RemoteWebDriver(executor.findElement(By.setBrowserName( "safari" ).7.dragAndDropOn(to). Release 1.0 //the browser has been closed. Also. you are limited by the JavaScript sandbox. There are currently some major limitations with this approach. from.7 Tips and Tricks 10. 118 Chapter 10. Assuming that the profile has been created using Firefox’s profile manager (firefox -ProfileManager): ProfileIni allProfiles = new ProfilesIni().8.addAdditionalPreference( "general. This can lead to some unexpected behaviour unless you are aware of the differences in the various xpath engines. For example. WebDriver uses a browser’s native XPath capabilities wherever possible. profile. they are disabled by default. so for the following piece of HTML: 10. Release 1.7. 10. until we feel native events are stable on Firefox for Linux.useragent. WebDriver driver = new FirefoxDriver(profile). How XPATH Works in WebDriver 119 . To enable them: FirefoxProfile profile = new FirefoxProfile().getProfile( "WebDriver" ). Enabling features that might not be wise to use in Firefox As we develop features in the Firefox Driver.Selenium Documentation. 23). if the profile isn’t already registered with Firefox: File profileDir = new File( "path/to/top/level/of/profile" ). FirefoxProfile profile = new FirefoxProfile(profileDir).setPreferences( "foo.7.addAdditionalPreferences(extraPrefs).2 Changing the user agent This is easy with the Firefox Driver: FirefoxProfile profile = new FirefoxProfile(). 10.override" .0 10. "some UA string" ). WebDriver driver = new FirefoxDriver(profile). but you’ve got a tricked out Firefox profile that contains dozens of useful extensions. profile.8 How XPATH Works in WebDriver At a high level. WebDriver driver = new FirefoxDriver(profile). Alternatively. On those browsers that don’t have native XPath support. FirefoxProfile profile = allProfiles. WebDriver driver = new FirefoxDriver(profile).3 Tweaking an existing Firefox profile Suppose that you wanted to modify the user agent string (as above). we expose the ability to use them. profile. profile. we have provided our own implementation.bar" . There are two ways to obtain this profile.setEnableNativeEvents(true).. 0 <input type= "text" name= "example" /> <INPUT type= "text" name= "other" /> The following number of matches will be found XPath expression //input //INPUT HtmlUnit Driver 1 (“example”) 0 Firefox Driver 2 2 Internet Explorer Driver 2 0 10. you need to add the following dependency to your pom.9.8.1 From a New Download Unpack the “webdriver-all. The support packages give you useful helper classes. This will give you the Chrome Driver.7376</version> </dependency> If you want to use the Firefox Driver. 10.xml: <dependency> <groupId>org.9. the “input” tag does not require the “type” attribute because it defaults to “text”. HtmlUnit Driver.7376</version> </dependency> If you want to use the Internet Explorer Driver. Selenium 2. Internet Explorer Driver.seleniumhq. and add all the JARs to your CLASSPATH.9 Getting and Using WebDriver 10.9. you need to add the following dependency to your pom.9.0 and WebDriver . 10.1 Matching Implicit Attributes Sometimes HTML elements do not need attributes to be explicitly declared because they will default to known values. such as the LiftStyleApi and the PageFactory. Remote Web Driver client and the support packages.webdriver</groupId> <artifactId>webdriver-ie</artifactId> 120 Chapter 10. Release 1.Selenium Documentation.2 With Maven If you want to use the HtmlUnit Driver.xml: <dependency> <groupId>org.xml: <dependency> <groupId>org.webdriver</groupId> <artifactId>webdriver-htmlunit</artifactId> <version>0.seleniumhq. The rule of thumb when using xpath in WebDriver is that you should not expect to be able to match against these implicit attributes. Firefox Driver.zip” you can download from the site.seleniumhq.webdriver</groupId> <artifactId>webdriver-firefox</artifactId> <version>0. For example. add the following dependency to your pom. 10.9.7376</version> </dependency> Finally. you should add the following dependency to your pom. Roadmap 121 . you need to add the following dependency to your pom. Release 1.0 <version>0.xml: <dependency> <groupId>org.7376</version> </dependency> 10.xml: <dependency> <groupId>org.seleniumhq. if you like to use any of our support classes.webdriver</groupId> <artifactId>webdriver-support</artifactId> <version>0.webdriver</groupId> <artifactId>webdriver-chrome</artifactId> <version>0.seleniumhq.10 Roadmap The roadmap for WebDriver is available here 10.9.11 Further Resources You can find further resources for WebDriver in WebDriver’s wiki Appendixes: 10.Selenium Documentation.9.7376</version> </dependency> If you want to use the Chrome Driver. Selenium 2.0 122 Chapter 10.0 and WebDriver .Selenium Documentation. Release 1. To Configure it with Visual do as Following.NET CLIENT DRIVER CONFIGURATION . • Launch Visual Studio and navigate to File > New > Project.NET client Driver can be used with Microsoft Visual Studio. • Select Visual C# > Class Library > Name your project > Click on OK button. 123 .CHAPTER ELEVEN . Selenium Documentation. Rename it as appropriate. .cs) is created. • Under right hand pane of Solution Explorer right click on References > Add References.0 • A Class (.NET client driver configuration . Release 1. 124 Chapter 11. Selenium.ThoughtWorks.dll.0 • Select following dll files . ThoughtWorks.Core.framework.UnitTests. nunit. Release 1.core.dll and click on Ok button 125 .dll.Selenium. ThoughtWorks.nmock.dll.dll. Selenium.Selenium Documentation.IntegrationTests.dll. nunit. 126 Chapter 11.0 With This Visual Studio is ready for Selenium Test Cases. .NET client driver configuration .Selenium Documentation. Release 1. (Europa Release). 127 . Following lines describes configuration of Selenium-RC with Eclipse . Perl. It is written primarily in Java and is used to develop applications in this language and. in other languages as well as C. • Select File > New > Other.3. It should not be too different for higher versions of Eclipse • Launch Eclipse.0. PHP and more.1 Configuring Selenium-RC With Eclipse Eclipse is a multi-language software development platform comprising an IDE and a plug-in system to extend it. Python. Cobol.Version: 3. by means of the various plug-ins. Release 1. Java Client Driver Configuration .0 • Java > Java Project > Next 128 Chapter 12.Selenium Documentation. 5 selected in this example) > click Next 12. Release 1.1. Configuring Selenium-RC With Eclipse 129 .0 • Provide Name to your project.Selenium Documentation. Select JDK in ‘Use a project Specific JRE’ option (JDK 1. (This described in detail in later part of document. Project specific libraries can be added here.) 130 Chapter 12.Selenium Documentation. Java Client Driver Configuration . Release 1.0 • Keep ‘JAVA Settings’ intact in next window. 1. 12.0 • Click Finish > Click on Yes in Open Associated Perspective pop up window. Configuring Selenium-RC With Eclipse 131 .Selenium Documentation. Release 1. Selenium Documentation. 132 Chapter 12. Java Client Driver Configuration . Release 1.0 This would create Project Google in Package Explorer/Navigator pane. Selenium Documentation.1. Configuring Selenium-RC With Eclipse 133 . Release 1.0 • Right click on src folder and click on New > Folder 12. 0 Name this folder as com and click on Finish button. Release 1. Java Client Driver Configuration . • This should get com package insider src folder.Selenium Documentation. 134 Chapter 12. 1. Release 1.0 • Following the same steps create core folder inside com 12. Configuring Selenium-RC With Eclipse 135 .Selenium Documentation. Release 1.0 SelTestCase class can be kept inside core package. Please notice this is about the organization of project and it entirely depends on individual’s choice / organization’s standards. Create one more package inside src folder named testscripts.Selenium Documentation. Java Client Driver Configuration . 136 Chapter 12. Test scripts package can further be segregated depending upon the project requirements. This is a place holder for test scripts. selenium server etc) 12.1. Selenium client driver.Selenium Documentation.0 • Create a folder called lib inside project Google. This is a place holder for jar files to project (i. Release 1.e. Right click on Project name > New > Folder. Configuring Selenium-RC With Eclipse 137 . 138 Chapter 12.Selenium Documentation. Release 1.0 This would create lib folder in Project directory. Java Client Driver Configuration . Release 1.Selenium Documentation. Configuring Selenium-RC With Eclipse 139 .1.0 • Right click on lib folder > Build Path > Configure build Path 12. 0 • Under Library tab click on Add External Jars to navigate to directory where jar files are saved. Release 1. Select the jar files which are to be added and click on Open button. Java Client Driver Configuration .Selenium Documentation. 140 Chapter 12. 12.1. Release 1. Configuring Selenium-RC With Eclipse 141 .0 After having added jar files click on OK button.Selenium Documentation. 0 Added libraries would appear in Package Explorer as following: 142 Chapter 12.Selenium Documentation. Java Client Driver Configuration . Release 1. 2 Configuring Selenium-RC With Intellij IntelliJ IDEA is a commercial Java IDE by the company JetBrains. Configuring Selenium-RC With Intellij 143 . Release 1. 12.0 It should not be very different for higher version of intelliJ.Selenium Documentation. Apache Ant and JUnit.2. • Open a New Project in IntelliJ IDEA.0 12. Following lines describes configuration of Selenium-RC with IntelliJ 6. IntelliJ IDEA provides close integration with popular open source development tools such as CVS. Intellij provides a set of integrated refactoring tools that allow programmers to quickly redesign their code. Subversion. 144 Chapter 12. • Click Next and provide compiler output path.Selenium Documentation. Java Client Driver Configuration .0 • Provide name and location to Project. Release 1. • Click Next and select Single Module Project. 12.Selenium Documentation.2. Configuring Selenium-RC With Intellij 145 . Release 1.0 • Click Next and select the JDK to be used. Selenium Documentation. Java Client Driver Configuration . • Click Next and provide Module name and Module content root.0 • Click Next and select Java module. • Click Next and select Source directory. Release 1. 146 Chapter 12. • Click on Project Structure in Settings pan. Adding Libraries to Project: • Click on Settings button in the Project Tool bar. 12.0 • At last click Finish.2. This will launch the Project Pan. Release 1. Configuring Selenium-RC With Intellij 147 .Selenium Documentation. 148 Chapter 12. Release 1.0 • Select Module in Project Structure and browse to Dependencies tab.Selenium Documentation. Java Client Driver Configuration . ).Selenium Documentation. Configuring Selenium-RC With Intellij 149 . • Browse to the Selenium directory and select selenium-java-client-driver.0 • Click on Add button followed by click on Module Library.2. 12. Release 1.jar and seleniumserver.jar. (Multiple Jars can be selected b holding down the control key. Release 1. 150 Chapter 12.Selenium Documentation.0 • Select both jar files in project pan and click on Apply button. Java Client Driver Configuration . 2. Added jars would appear in project Library as following. Configuring Selenium-RC With Intellij 151 . 12.Selenium Documentation.0 • Now click ok on Project Structure followed by click on Close on Project Settings pan. Release 1. Release 1. 152 Chapter 12. Java Client Driver Configuration .0 • Create the directory structure in src folder as following.Selenium Documentation. 12.Selenium Documentation. Release 1.2. Configuring Selenium-RC With Intellij 153 . • Herein core contains the SelTestCase class which is used to create Selenium object and fire up the browser.0 Note: This is not hard and fast convention and might very from project to project. testscripts package contains the test classes which extend the SelTestCase class. Hence extended structure would look as following. Release 1.Selenium Documentation.0 154 Chapter 12. Java Client Driver Configuration . After following this.x. Download Active python’s installer from ActiveState’s official site: THIRTEEN PYTHON CLIENT DRIVER CONFIGURATION • Download Selenium-RC from the SeleniumHQ downloads page • Extract the file selenium. Run the installer downloaded (ActivePython-x. as in most linux distributions python is already pre-installed by default.mhtml 2.py • Run Selenium server from the console • Execute your test from a console or your Python IDE The following steps describe the basic installation procedure. the user can start using the desired IDE.x-win32-x86.com/Products/activepython/index. (even write tests in a text processor and run them from command line!) without any extra work (at least on the Selenium side).msi) 155 . • Add to your test’s path the file selenium. • Installing Python Note: This will cover python installation on Windows and Mac only. – Windows 1.py • Either write your Selenium test in Python or export a script from Selenium-IDE to a python file.x. Release 1. Python Client Driver Configuration . To install an extra Python.pythonmac.org/ (packages for Python 2.5.0 • Mac The latest Mac OS X version (Leopard at this time) comes with Python pre-installed.Selenium Documentation. 156 Chapter 13. get a universal binary at). 0 You will get a . 157 .Selenium Documentation. you’re done! Now any python script that you create can import selenium and start interacting with the browsers. You will find the module in the extracted folder. Download the last version of Selenium Remote Control from the downloads page 2. Congratulations. It contains a .py) in the folder C:/Python25/Lib (this will allow you to import it directly in any script you write).dmg file that you can mount. • Installing the Selenium driver client for python 1. Copy the module with the Selenium’s driver for Python (selenium. Extract the content of the downloaded zip file 3. it’s located inside seleniumpython-driver-client.pkg file that you can launch. Release 1. 0 158 Chapter 13.Selenium Documentation. Release 1. Python Client Driver Configuration . ’text-’)] 14.locate elements based on the text content of the node.CHAPTER FOURTEEN LOCATING TECHNIQUES 14.1.1 Useful XPATH patterns 14. if your dynamic ids have the format <input id="text-12345" /> where 12345 is a dynamic number you could use the following XPath: //input[starts-with(@id. Incidentally.2. To demonstrate. 14. • XPath: //div[contains(@class. the element <span class="top heading bold"> can be located based on the ‘heading’ class without having to couple it with the ‘top’ and ‘bold’ classes using the following XPath: //span[contains(@class.heading 14.1 text Not yet written . the contains function can be used. this would be much neater (and probably faster) using the CSS locator strategy css=span. which can make them difficult to locate. Useful for forms and tables.3 contains If an element can be located by a value that could be surrounded by other text.locate elements based on their siblings.1. however with CSS locators this is much simpler (and faster).4 siblings Not yet written .1 Locating elements based on class In order to locate an element based on associated class in XPath you must consider that the element could have multiple classes and defined in any order. ’heading’)]. For example. ’article-heading’)] 159 . 14. One simple solution is to use XPath functions and base the location on what you do know about the element.1.1.2 starts-with Many sites use dynamic values for element’s id attributes.2 Starting to use CSS instead of XPATH 14. Release 1.0 • CSS: css=div.Selenium Documentation.article-heading 160 Chapter 14. Locating Techniques . This action might not be possible to undo. Are you sure you want to continue? We've moved you to where you read on your other device. Get the full title to continue listening from where you left off, or restart the preview.
https://pt.scribd.com/doc/48864974/Selenium-Documentation
CC-MAIN-2016-30
refinedweb
33,579
52.15
If you have an interest in Mono, an open source, UNIX version of the Microsoft .NET development platform, then you should check out TheServerSide.Net's interview with Miguel De Icaza, the man who started the Mono project and GNOME. The interview is brief but still interesting as De Icaza discusses many of the new features in the coming Mono release. Mono has grown in popularity over the past few years with some companies using it for production applications. Now, with version 1.2 on the nearby horizon, the Mono team is working on a full-featured implementation of Windows Forms. Some of the components found in the 1.2 release include the entire C# language with Generics, all of the System.XML namespace, and about half of ADO.NET. Another feature that the Mono team is working on is an implementation of Ruby; something similar to what the Queensland University of Technology (QUT) currently has in the works. Besides discussing features that will be a part of Mono 1.2, De Icaza also talked about some of the things left out of the release. Probably the biggest cut was the inclusion of Visual Basic. "It will remain in an alpha state until contributors polish it up enough that it can be called a real compiler," De Icaza said. If you are looking for more reading, check out Linux.Ars' interview with Mono developer Todd Berman.
http://arstechnica.com/microsoft/news/2006/03/3406.ars
crawl-002
refinedweb
236
66.33
Piping is Method Chaining Want to share your content on R-bloggers? click here if you have a blog, or here if you don't.. Let’s work an example using Python‘s Pandas package (and classes). import pandas as pd data = [['alpha', 'a', 1, 0], ['beta', 'b', 2, 10], ['gamma', 'b', 3, 10]] df = pd.DataFrame(data, columns=['name', 'group', 'value', 'cost']) print(df) name group value cost 0 alpha a 1 0 1 beta b 2 10 2 gamma b 3 10 Method chaining is when methods return a reference to their host-object (or reference to a replacement for their host-object). This lets us call a sequence of methods one after the other as we show below. print(df.groupby("group").agg({"value":["max", "min"], "cost":["mean"]})) value cost max min mean group a 1 1 0 b 3 2 10 This may not be considered legible (especially as it was combined with print() function notation), so we use a common notation convention and insert a line-break before each method dispatch “ .“. The parenthesis surrounding the whole expression are a common Python convention to facilitate multi-line expressions. ( df .groupby("group") .agg({"value":["max", "min"], "cost":["mean"]}) .pipe(print) ) value cost max min mean group a 1 1 0 b 3 2 10 Or, to emphasize the similarity to pipes, we can use another convention (that contravenes the PEP8 style guide): end the lines with .\ which is the method dispatch “ .” symbol plus a line continuation mark. df .\ groupby("group") .\ agg({"value":["max", "min"], "cost":["mean"]}) .\ pipe(print) value cost max min mean group a 1 1 0 b 3 2 10 The above is just as with the Bizarro Pipe in R: the pipe is available as a convention over the existing language syntax. In Python (for method chaining enabled classes and methods) the glyph “ .” is in fact already a method application operator or pipe (as is the glyph “ .\EOL“, where EOL denotes the line-break or end of line). With method-chaining conventions the “ .” already is “a pipe” organizing method application form left to right without the need for illegible nesting. In R the glyph “ ->.;” is a function application operator or pipe (which we called the Bizarro Pipe; the Bizarro Pipe is a first-rate pipe, faster than other pipes, and interferes less with debugging than other pipes). Both languages have had this application capability for a very long time. We are using Pandas and pipe() as our example, but any package that whose methods return a reference to the object being worked on (or a reference to a replacement object) can be treated as a pipe-able object. If the class further implements one function re-director method (such as pipe()) then a lot more becomes practical. Here is another example showing how additional named and unnamed arguments can be handled. def add_delta_to_column(df, colname, delta): df[colname] = df[colname] + delta return df df .\ pipe(add_delta_to_column, "cost", 5) .\ groupby("group") .\ agg({"value":["max", "min"], "cost":["mean"]}) .\ pipe(print, "DEBUG1", sep = " | ") value cost max min mean group a 1 1 5 b 3 2 15 | DEBUG1 So depending on your point of view: “piping is poor-persons’s method chaining” or “method chaining is poor-persons’s piping” (taken from the usual quote comparing objects and closures). If one wants to go further, there are a number of Python packages adding additional significant piping capabilities (either through notation, operator overloading, or other methods). - sklearn.pipeline - Stack overflow notes 1 - Stack overflow notes 2 - dplython - sspipe - Tidyverse pipes in Pandas - chainlearn And that is piping versus method ch.
https://www.r-bloggers.com/2019/04/piping-is-method-chaining/
CC-MAIN-2021-17
refinedweb
602
61.77
autoload Autoloading symbols via source code grokking Want to see pretty graphs? Log in now!Want to see pretty graphs? Log in now! npm install autoload autoload -- Automatically load symbols in NodeJS INTRODUCTION In NodeJS, and CommonJS in general, it's difficult to depend on common globals to be defined when you need them. This has led to very chatty or verbose boilerplate in applications to grab handles to functions and objects in every module in a project. Generally module exports are used to define pseudo-classes or pseudo-namespaces which make sense as as globals. The problem is that even if your module defines its exports as global there's no way to be sure the module you depend on has been required or not. autoload attempts to ease this situation. INSTALLING npm install autoload GETTING STARTED In your top-level script, call autoload and registeredGlobalsAutoloader to initialize a typical autoloading environment: main.js: var autoload = require('autoload'); autoload.autoload(__dirname, [autoload.registeredGlobalsAutoloader(require)], function() { << your regular code goes here >> }); << don't put code here, as autoload is not ready yet! >> To register a global as autoloadable, in another module you would do: my-global-function.js: registerGlobal(function MyGlobalFunction() { << function here >> }); When you call autoload it will search __dirname for Javascript files and attempt to find all globals which could be defined (via registerGlobal). After it finds those symbols it registers autoloading getters on the global object (but does not actually require the module). When the getter is invoked the module is require'd and the symbol is returned. Be sure to look at the source code for registeredGlobalsAutoloader, as you can use this library to autoload symbols in scopes outside of global as well, or you can implement your own global exporting pattern too. For instance of you prefer global.MySymbol = ... or MySymbol = ... you could wire that up. I prefer having the global be super explicit which is why I made registerGlobal().
https://www.npmjs.org/package/autoload
CC-MAIN-2014-15
refinedweb
325
55.95
« Return to documentation listing #include <mpi.h> int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status) INCLUDE ’mpif.h’ MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) <type> BUF(*) INTEGER COUNT, DATATYPE, SOURCE, TAG, COMM INTEGER STATUS(MPI_STATUS_SIZE), IERROR #include <mpi.h> void Comm::Recv(void* buf, int count, const Datatype& datatype, int source, int tag, Status& status) const void Comm::Recv(void* buf, int count, const Datatype& datatype, int source, int tag) const (comm) values match the source, tag, and comm values specified by the receive operation. The receive operation may specify a wildcard value for source and/or tag, indicating that any source and/or tag are acceptable. The wildcard value for source is source = MPI_ANY_SOURCE. The wildcard value for tag is tag = MPI_ANY_TAG. There is no wildcard value for comm. The scope of these wildcards is limited to the proceses in the group of the specified communicator. not recommended for a process to send messages to itself using the blocking send and receive operations described above, since this may lead to deadlock. See Section 3.5 of the MPI-1 Standard, "Semantics of Point-to-Point Communication." If your application does not need to examine the status field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the status_Irecv MPI_Probe « Return to documentation listing
http://www.open-mpi.org/doc/v1.4/man3/MPI_Recv.3.php
crawl-003
refinedweb
231
54.73
Apache log4net™ Frequently Asked Questions Information What is log4net?. Is log4net a reliable logging system?. What are the prerequisites for log4net? log4net runs on many different frameworks and each framework has its own requirements. As a rule of thumb you will need an ECMA-335 compliant CLI runtime, for example, the Microsoft® behavior categorizes logging into levels:. See the features overview document for more information on the features of log4net. What does log output look like? The log output can be customized in many ways. Moreover, one can completely override the output format by implementing one's own ILayout Here is an example output using PatternLayout with the conversion pattern %timestamp [%thread] %-5level %logger{2} %ndc - %message%newline. The first field is the number of milliseconds elapsed since the start of the program. The second field is the thread outputting the log statement. The third field is the level of the log statement. The fourth field is the rightmost two components of the name of the logger making the log request. The fifth field (just before the '-') is the nested diagnostic context (NDC). Note the nested diagnostic context may be empty as in the first two statements. The text after the '-' is the message of the statement. What are Loggers? The logger concept lies at the heart of log4net's configuration. Loggers are organized into a hierarchy and give the programmer run-time control on which logging statements are printed or not. Loggers are assigned levels through the configuration of log4net. A log statement is routed through to the appender depending on its level and its logger. code. You are free to do whatever you wish with your proprietary log4net extensions. In particular, you may choose to never release your extensions to the wider public. For details see the Apache License, Version 2.0..? What is the history of log4net? log4net is a port of the popular Apache log4j™ logging library. The initial port was done in June 2001, since then we have tried to remain in the spirit of the original log4j. See the log4net history page for more details. Where can I find the latest distribution of log4net? The log4net home page is a good place to start. Why are there two different strong name keys? Starting with log4net 1.2.11 there are two different binary distributions, oldkey and newkey. The oldkey distribution contains assemblies signed with the same strong name key that was used to sign the assemblies of log4net 1.2.10 and earlier. This strong name key is only available to log4net developers. The newkey distribution contains assemblies signed with the strong name key available from log4net's svn area or inside the source distribution. Everybody can create assemblies that have the same strong name. For open source projects it is important that you can create your own patched version of a product and use it instead of the official release. This is something that is now possible if the newkey is used throughout. The oldkey distribution is mostly only for people who work with third-party dependencies that require one of the earlier releases of log4net and can't be recompiled to use the new strong name. If you start a new project or can recompile all your dependencies we strongly recommend you use the newkey assemblies. release from one of the mirrors and verify the PGP signature. Configuration How can I change log behavior at runtime?. are the configurable options for an appender?. Is it possible to direct log output to different appenders by level?. Is there a way to get log4net to automatically reload a configuration file if it changes? Yes. The XmlConfigurator supports automatic reloading through the ConfigureAndWatch APIs. See the API documentation for more details. Can I load an appender from another assembly?. How do I get. How do I insert newlines into the layout header?> How do I use a pattern to set the value of a string property?> Implementing Logging. However, this is not the only way for naming loggers. A common alternative is to name loggers by functional areas. For example, the "database" logger, "remoting" logger, "security" logger, or the "XML" logger. You may choose to name loggers by functionality and subcategorize by locality, as in "DATABASE.MyApp.MyClass" or "DATABASE.MyApp.MyModule.MyOtherClass". You are totally free in choosing the names of your loggers. The log4net package merely allows you to manage your names in a hierarchy. However, it is your responsibility to define this hierarchy. Note: by naming loggers by locality one tends to name things by functionality, since in most cases the locality relates closely to functionality. How do I get the fully-qualified name of a class in a static block?(). Note: the two forms are only equivalent if Foo is not a generic class. For a generic class Foo<T> the variant using typeof generates a different logger for each different type parameter T while the variant using reflection generates the same logger for all Ts. What is the fastest way of (not) logging?. What is REALLY the FASTEST way of (not) logging?. Can the outputs of multiple client request go to different log files?. Logger instances seem to be create only. Why isn't there a method to remove logger instances? It is quite nontrivial to define the semantics of a "removed" logger which is still referenced by the user. How do I get multiple process to log to the same file? Before you even start trying any of the alternatives provided, ask yourself whether you really need to have multiple processes log to the same file, then don't do it ;-). FileAppender offers pluggable locking models for this usecase but all existing implementations have issues and drawbacks. By default the FileAppender holds an exclusive write lock on the log file while it is logging. This prevents other processes from writing to the file. This model is known to break down with (at least on some versions of) Mono on Linux and log files may get corrupted as soon as another process tries to access the log file. MinimalLock only acquires the write lock while a log is being written. This allows multiple processes to interleave writes to the same file, albeit with a considerable loss in performance. InterProcessLock doesn't lock the file at all but synchronizes using a system wide Mutex. This will only work if all processes cooperate (and use the same locking model). The acquisition and release of a Mutex for every log entry to be written will result in a loss of performance, but the Mutex is preferable to the use of MinimalLock. If you use RollingFileAppender things become even worse as several process may try to start rolling the log file concurrently. RollingFileAppender completely ignores the locking model when rolling files, rolling files is simply not compatible with this scenario. A better alternative is to have your processes log to RemotingAppenders. Using the RemoteLoggingServerPlugin (or IRemoteLoggingSink) a process can receive all the events and log them to a single log file. One of the examples shows how to use the RemoteLoggingServerPlugin. If I have many processes across multiple hosts (possibly across multiple time zones) logging to the same file using the RemotingAppender, what happens to timestamps?. When should I log my first message?. Customization Can the log output format be customized?. Can I write a custom appender?. Troubleshooting How do I enable log4net internal debugging? There are 2 different ways to enable internal debugging in log4net. These are listed below. The preferred method is to specify the log4net.Internal.Debug option in the application's config file. Internal debugging can also be enabled by setting a value in the application's configuration file (not the log4net configuration file, unless the log4net config data is embedded in the application's config file). The log4net.Internal.Debug application setting must be set to the value true. For example: <?xml version="1.0" encoding="utf-8" ?> <configuration> <appSettings> <add key="log4net.Internal.Debug" value="true"/> </appSettings> </configuration> This setting is read immediately on startup an will cause all internal debugging messages to be emitted. To enable log4net's internal debug programmatically you need to set the log4net.Util.LogLog.InternalDebugging property to true. Obviously the sooner this is set the more debug will be produced.. How can I evaluate configuration errors at runtime? To prevent silent failure of log4net as reported as LOG4NET-342, log4net supports a way to evaluate if it was configured and also to evaluate messages generated on startup since 1.2.11. To check if log4net was started and configured properly one can check the property log4net.Repository.ILoggerRepository.Configured and enumerate the configuration messages as follows: if(!log4net.LogManager.GetRepository().Configured) { // log4net not configured foreach(log4net.Util.LogLog message in log4net.LogManager.GetRepository().ConfigurationMessages.Cast<log4net.Util.LogLog()) { // evaluate configuration message } } Why doesn't the EventLogAppender work?: Make the ASPNET user a member of the Administrators group. This will work because the user will then have the required permissions. This is not recommended for production use. As the event source only needs to be created once for the machine, create an installer and configure it to create the event source. The installer will need to be run as Administrator (don't they all). See System.Diagnostics.EventLogInstaller in the Microsoft .NET Framework SDK for an example of how to create a simple event log installer. There is a Microsoft Knowledge Base article that covers this issue and how to resolve it. PRB: "Requested Registry Access Is Not Allowed" Error Message When ASP.NET Application Tries to Write New EventSource in the EventLog. Why can't I log to a FileAppender from a web application?. Why doesn't the logging in my service work?. Why does my ASP.NET web application stop logging when deployed on an IIS? This problem has been reported by several people as issue LOG4NET-178. The issue seems to be caused by a broken LOG4NET configuration or a timing problem caused by an application shutdown event that floats in late after an application start event and thus LOG4NET stops logging immediately after it has been started. The first thing step to troubleshoot problems is enabling the log4net internal debugging features as described here and fix all errors that pop up. If the problem still persists,. I am having trouble using the AdoNetAppender to connect to my database?. How do I report bugs? First make sure it really is a bug and not a usage error. When in doubt, ask on the log4net-user mailing list first. If you have identified a bug, please report it via our Issue Tracker. You may want to check it hasn't been reported before by searching the existing issues. log4net doesn't log when built in RELEASE mode If you use attributes to configure log4net then the order by which assemblies are loaded may determine whether you attributes are used or not. Assembly load order may be different in DEBUG and RELEASE mode. As stated in the manual the attribute will only be read for the first assembly that tries to use log4net. So it is important that you obtain your ILog instance as early as possible. For a command line application "as early as possible" probably is the class holding the Main method, for a Web-Application it would be your Global.asax class and for a Windows Service it would be the class deriving from ServiceBase. log4net doesn't log at all You may have overlooked initialization code for log4net in your application. log4net can be initialized explicitly by calling one of the configurators (e.g. BasicConfigurator or XmlConfigurator in the log4net.Config namespace, or implicitly by including the [XmlConfiguratorAttribute] in the assembly where log4net is first used. See the manual for more information. If you use attributes to configure log4net then the order by which assemblies are loaded may determine whether you attributes are used or not. Assembly load order may be different in DEBUG and RELEASE mode. See also log4net doesn't log when built in RELEASE mode. The ADO.NET Appender doesn't reconnect after a DB failure on .NET 4.5.1 Starting with .NET 4.5.1 ADO.NET has added connection resiliency which is supposed to re-establish the connection as part if the framework. As a result log4net doesn't know the connection is broken and will never attempt to re-establish the connection. Unfortunately re-connecting doesn't seem to be working reliably. A workaround may be to add ConnectRetryCount=0 to your connection string. For details see LOG4NET-442 Miscellaneous How do I make log4net appear in the Visual Studio Add References dialog? There is a good discussion of this topic on Robert McLaws blog: Building a Better Server Control Experience, Part 2. Do you provide a Nuget package? No, the log4net project doesn't provide a Nuget package of its own, but Jiří Činčura has kindly created a Nuget package he develops at. Please report any issues with the packaging at his github issue tracker.
https://logging.apache.org/log4net/release/faq.html
CC-MAIN-2016-44
refinedweb
2,185
58.18
Application Developer's Guide This chapter describes how to use the Liquid Data control in WebLogic Workshop to develop applications that use data from Liquid Data queries. Applications can use the data to display results in a web application, to use in a Web Service, to use as an input to a WebLogic Integration business process, or in many other ways. The following topics are included: WebLogic Workshop allows you to create Liquid Data Controls. The Liquid Data Control is a Java Control, and it allows you to very rapidly generate robust applications that use results from Liquid Data queries (for example, to display in a web application or to use in a WebLogic Integration business process). This section describes the Liquid Data control and the applications you can create with it. The Liquid Data Control is available in WebLogic Workshop. The Liquid Data Control is an extensible Java Control that accesses the Liquid Data server to execute queries from applications developed in WebLogic Workshop. The Liquid Data Control is available with all of the other Java Controls in WebLogic Workshop (for example, the database control). When you use the Liquid Data Control in WebLogic Workshop, Workshop displays a query wizard which connects to a Liquid Data server to get the query metadata needed for configuring the control. After you select the queries to use in your Liquid Data Control, Workshop generates XMLBean classes for the target schemas associated with the queries and then generates a Liquid Data Control ( .jcx) file. When you create a Liquid Data control in WebLogic Workshop, the Liquid Data Control wizard generates XMLBean classes for each query in the control. The Liquid Data Control wizard uses the schema associated with the stored query in the Liquid Data repository to generate the structure for the XMLBean classes. The XMLBean classes provide Java methods to traverse the XML result set returned from Liquid Data. The XMLBean classes are automatically generated in a schema project in the workshop application. There is one schema project per Liquid Data Control ( .jcx) file. You can use the Liquid Data Control like other controls in WebLogic Workshop, and you can take advantage of Workshop features to use Liquid Data Controls in Web Services, Page Flows and WebLogic Integration business processs. For example, you can generate a page flow from your Liquid Data control and then use the XMLBeans to bind the data returned from Liquid Data to the JSPs in your application. When you create a Liquid Data Control, WebLogic Workshop generates a Java Control Extension ( .jcx) file. The file contains methods corresponding to the queries in which the control accesses, shows the schema files of each query as a comment, and contains a commented method which, when uncommented, allows you to pass any XQuery statement to execute an ad-hoc query. This section describes the Liquid Data Control ( .jcx) file and includes the following sections: The design view of the Liquid Data Control ( .jcx) file shows the available methods in a graphical view. Figure 2-1 Design View of a Control File With the right-click menu, you can add, modify (for example, change the query accessed by a method), rename, and delete methods. The right-click menu is context-sensitive; it displays different items if the mouse cursor is over a method, or in the control portion of the design pane. The source view shows the source code of the Java Control Extension ( .jcx) file. It includes as comments the schema used to generate the XMLBean classes for each query. The signature for each method shows the return type of the method. The return type is the XMLBean class which was generated for the schemas. This file is a generated file and the only time you should need to edit the source code is if you want to add a method to run an ad-hoc query, as described in Running Ad-Hoc Queries through the Liquid Data Control. The following shows the source code for a generated Liquid Data Control ( .jcx) file. It shows the package declaration, import statements, connection properties, the schema project and filename used with the ApplOrderDetailView query, and the method that executes the ApplOrderDetailView query. package myFolder; import weblogic.jws.control.*; import com.bea.ld.control.LDControl; /** * @jc:LiquidData urlKey="myApp.myAppWeb.myFolder.anotherLDControl" */ public interface anotherLDControl extends LDControl, com.bea.control.ControlExtension { /* Generated methods corresponding to stored queries. */ /** * ); */ } The XMLBean classes corresponding to the queries in the Liquid Data Control ( .jcx) file are generated in a schema project. There is one schema project for each control. The schema project(s) also contain a copy of the schema files associated with the queries in the Liquid Data Control ( .jcx) file. The JAR file for the XMLBean classes is generated in the Libraries directory of your WebLogic Workshop application. The @return Schema Project section of the generated Liquid Data Control ( .jcx) file displays the location of the schemas corresponding to the query method below this section in the control file. For example, the following code snippet from a generated Liquid Data Control ( .jcx) file shows the name of the schema project and the name of the target schema. /** * ); The name of the schema project is myAppWeb-myFolder-anotherLDControl-Schemas, and the schema file is in the rtl subdirectory and is named OrderDetailView.xsd. Figure 2-2 shows the generated schema project in WebLogic Workshop. Figure 2-2 Generated Schema Project in WebLogic Workshop At the bottom of the generated Liquid Data Control ( .jcx) file is a comment showing methods you can add which allow you to run an ad-hoc query through the control. To add one of these methods, uncomment the appropriate method and add a return type to the signature. /** *); */ You can create Liquid Data controls in a variety of WebLogic Workshop projects. This section includes the following procedures to create Liquid Data controls: The steps are similar for creating Liquid Data controls in other types of WebLogic Workshop projects. This section describes the general steps for creating a Liquid Data control. For detailed steps for creating a Liquid Data control in a Web Project or in a Web Service project, see To Create a Liquid Data Control in a Web Project or To Create a Liquid Data Control in a Web Service Project. Before you can create a Liquid Data control in WebLogic Workshop, you must create an application and create a project in the application. You can create a Liquid Data control in most types of Workshop projects, but the most common projects in which you create Liquid Data controls are Web Projects, Web Service Projects, Portal Web Projects, or a Process Web Projects. Make sure the Liquid Data server is running. The Liquid Data server can be running locally (on the same domain as WebLogic Workshop) or remote (on a different domain from workshop). If the Liquid Data server is not running, start up the domain in which it runs. Create a folder in the project to hold the Liquid Data control(s). You can also create other controls (database controls, for example) in the same folder, if needed. Workshop controls cannot be created at the top-level of a project directory structure; they must be created in a folder. When you create the folder, enter a name that makes sense for your application. Figure 2-3 Create a New Folder in WorkshopLiquid Data The Liquid Data Control is a Java Control Extension. To create a Liquid Data Control, start the Java Control wizard by selecting your folder within a project, right-clicking, and selecting New —> Java Control, as shown in Figure 2-4. You can also create a control using the File —> New—> Java Control menu item. Figure 2-4 Create a New Liquid Data Control Then select Liquid Data from the New Java Control Extension dialog, as shown in Figure 2-5. Enter a filename for the control ( .jcx) file and click Next. Figure 2-5 Liquid Data Control in WebLogic Workshop Note: The LiquidDataControl.jar file is copied into the Libraries directory (if it does not already exist) of your application when you create a Liquid Data Control. A screen similar to the one in Figure 2-6 allows you to enter connection information to your Liquid Data server. If the server is local, the Liquid Data control uses the connection information stored in the application properties (to view these settings, access the Tools —> Application Properties menu item in the IDE). If the Liquid Data server is remote, click the Remote button and fill in the appropriate server URL, user name, and password. Note: You can specify a different username and password with which you connect to a local machine on the Liquid Data Control Wizard Connection Information dialog, too. To do this, click the Remote button and enter the connection information (with a different username and password) for your local machine. The security credentials specified through the Application Properties or through the Liquid Data Control Wizard are only used for creating the Liquid Data Control ( .jcx) file, not for testing queries through the control. For more details, see Security Considerations With Liquid Data Controls. When the information is correct, click Create to go to the next step. Figure 2-6 Liquid Data Control Wizard—Connection Information In the Select Liquid Data Queries screen, select queries from the left pane and click Add to add those queries to the control. If you mouse over a query, the signature of the control method for the query appears in a tooltip popup. A "fetching metadata" message appears if the signature has not yet been retrieved from the Liquid Data server. Note: Only stored queries with a schema configured appear in the Stored Queries list. For details on configuring stored queries, see Configuring Stored Queries in the Administration Guide. You can also deploy stored queries from directly from the Data View Builder, as described on Deploying a Query in Building Queries and Data Views. Select one or more queries, add them to the right pane, and click Finish. When you click Finish, the Liquid Data Control ( .jcx) file is generated and XMLBean classes corresponding to the schema for each stored query in the control are generated. The XMLBeans are stored in the Libraries directory of the Workshop Application. In the Libraries directory, there is one JAR file for each Liquid Data control, with the XMLBeans included in the JAR file. The JAR files are named according to the project and directory hierarchy for the control ( .jcx) file. Figure 2-7 Liquid Data Control Wizard—Select Queries Note: The stored queries should be named according to the Naming Conventions for Stored Queries described in Building Queries and Data Views. If a stored query contains illegal characters (for example, a hyphen), the method generated in the Liquid Data Control ( .jcx) file might be an invalid Java name, causing compilation errors. If a method name is invalid, you can change the name to make it valid. Clicking the New Query button launches the Data View Builder. You can then use the Data View Builder to create, modify, test, and deploy new queries. The Refresh button updates the stored query list from the Liquid Data server. If you create and deploy a new query with the Data View Builder, click the Refresh button to display the new query in the wizard. This section describes the basic steps for creating a Liquid Data control in a new Web Project. If you are adding the control to an existing project, you might not need to perform each step (for example, creating a new project). Perform the following steps to create a Liquid Data control in a new WebLogic Workshop Web Project. Workshop generates the .jcx Java Control Extension file for your Liquid Data control. Each method in the .jcx file returns an XMLBean type corresponding to the stored query schema. The XMLBean classes for each query are automatically generated when you create the Liquid Data control. The XMLBean classes are stored in the Libraries directory of the Workshop Application. This section describes the basic steps for creating a Liquid Data control in a new Web Service. If you are adding the control to an existing Web Service, you might not need to perform each step (for example, creating a new project). Perform the following steps to create a Liquid Data control in a new WebLogic Workshop Web Service Project. Workshop generates the .jcx Java Control Extension file for your Liquid Data control. Each method in the .jcx file returns an XMLBean of the type corresponding to the schema from the stored query. The XMLBean for each query is automatically generated when you create the Liquid Data control. The XMLBeans are stored in the Libraries directory of the Workshop Application. Perform the following steps to add a Liquid Data Control to an existing Web Service .jws file. Figure 2-8 Add a Liquid Data Control to Web Service Figure 2-9 Insert Control Wizard Perform the following steps to generate and test a web service from a Liquid Data Control. Workshop generates the .jws Java Web Service file for your Liquid Data control. Workshop builds an asynchronous Web Service from the .jws file. startTestDriveand finishTestDrivemethods, as well as a method for each of the queries you specified in the Liquid Data Control wizard. The Web Service executes the query and the results are returned to the test browser. This section describes the ways you can modify an existing Liquid Data control. It contains the following procedures: Perform the following steps to change the query that a method in a Liquid Data Control accesses. Figure 2-10 Changing the Query a Method Accesses Note: You can also access the Liquid Data Control Wizard from the property editor Figure 2-11 Opening the Property Editor from the Stored-Query Name Property Perform the following steps to add a new method to an existing Liquid Data control. Figure 2-12 Add a Method to a Control Alternately, you can launch the query wizard from the Stored-Query name property, as shown in Figure 2-11. You can use the query wizard to modify one or more queries accessed in an existing Liquid Data Control. A query corresponds to a method in the Liquid Data Control ( .jcx) file. Perform the following to invoke the Liquid Data query wizard and modify the query selection for an existing Liquid Data Control. urlor usernameattributes in the property editor. Figure 2-13 Invoking the Query Wizard from the Workshop Property Editor urlor usernameattributes, or if the Liquid Data Server is in a remote domain, the Liquid Data Connection Information screen appears. Enter a password and click OK. If you did not change the value of these attributes, then the wizard opens to the Property Editor where you select queries. If any of the schemas corresponding to any methods in a Liquid Data Control change, then you must update the Liquid Data Control to regenerate the XMLBeans for the changed schemas. Perform the following steps to update a Liquid Data Control WebLogic Workshop includes NetUI, which allows you to rapidly assemble applications that display data returned from Liquid Data queries. When you create a Liquid Data control, XMLBean classes are generated for the target schema of each stored query included as a method in the control. The following sections represent the basic steps for using NetUI to display results from a Liquid Data Control: You can generate a page flow from a Liquid Data Control ( .jcx) file. When you generate the page flow, Workshop creates the page flow, a start page ( index.jsp), and a JSP file for each method you specify in the Page Flow wizard. Perform the following steps to generate a page flow from a Liquid Data control. .jcx) file from the application file browser, right-click, and select Generate Page Flow. Figure 2-14 Enter a Name for the Page Flow Figure 2-15 Choose Liquid Data Methods for the Page Flow Workshop generates the .jpf Java Page Flow file, a start page ( index.jsp), and a JSP file for each method you specify in the Page Flow wizard. .jpffile for the XMLBeans. For details, see Adding XMLBean Variables to the Page Flow. You can add a Liquid Data Control to an existing Page Flow .jpf file. The procedure is the same as adding a Liquid Data Control to a Web Service, described in To Add a Liquid Data Control to an Existing Web Service File, except instead of opening the Web Service in Design View, you open the Page Flow .jpf file in Action View. You can also add a control to an existing page flow from the Page Flow Data Palette (available in Flow View and Action View of a Page Flow), as shown in Figure 2-16. Figure 2-16 Adding a Control to a Page Flow from the Data Palette In order to use the NetUI features to drag and drop data from an XMLBean into a JSP, you must first create one or more variables in the page flow .jpf file. The variables must be of the XMLBean type corresponding to the schema associated with the query. If you create a single variable at the top level of the XMLBean class (the same as the return type of the method in the Liquid Data Control ( .jcx) file), the NetUI repeater wizard can then access all the data from the query. Defining a single variable in the page flow .jpf file for the top-level class of the XMLBean (the same as the return type of the method in the Liquid Data Control ( .jcx) file) provides you access to all the data from the query (through the NetUI repeater wizard). When you create the Liquid Data control and the XMLBeans are generated, the XMLBean generation defines an array for each element in the schema that is repeatable. You might want to add other variables corresponding to other arrays in the XMLBean classes to make it more convenient to drag and drop data onto a JSP, but it is not required. Define each variable with a type corresponding to the XMLBean object of the parent node. Define the variables in the class that extend the PageFlowController class. For example, consider the case where you are trying to display XML data of the following form: <CUSTOMER>data</CUSTOMER> ......<PROMOTION>promotion data</PROMOTION> ...... You can add the following code snippet, which shows two variables (shown in bold type) added to the page flow: public class myPageFlowController extends PageFlowController { /** * This is the control used to generate this pageflow * @common:control */ private aLDControl myControl; // Add public Variables with XMLBeam types from the generated XMLBeans. // The type matches the return type of the method corresponding to the // query in the Liquid Data Control (.jcx) file. public com.mycorp.crm.CUSTDocument aVar; This code snippet declares one variable in the page flow, aVar, and the variable will display in the IDE to allow for drag-and-drop operations onto JSP files. Note: The variables of XMLBean type in the page flow must be declared public. You must also initialize the variable in the page flow method corresponding to the page flow action that calls the query. For details, see To Initialize the Variable in the Page Flow. Figure 2-17 Page Flow Variables for XMLBean Objects When you drag-and-drop an array onto a JSP file, the NetUI Repeater Wizard appears and guides you through selecting the data you want to display. Perform the following steps to add a variable of XMLBean type for your query. XMLBeantype corresponding to the schema elements you want to display. Depending on your schema, what you want to display, and how many queries you are using, you might need to add several variables. XMLBean. Create a variable of that type. For example, if the signature for a control method is as follows: mySchema.CUSTOMERPROFILEDocument myQuery(java.lang.String custid); create a variable as follows: public mySchema.CUSTOMERPROFILEDocument myCustomerVar; You must initialize your XMLBean variables in the Page Flow. Initializing the variables ensures that the data bindings to the variables work correctly and that there are no tag exceptions when the JSP displays the results the first time. Perform the following steps to initialize the XMLBean variables in the Page Flow: The following sample code shows an example of initializing a variable on the Page Flow. The code (and comments) in bold is what was added. The rest of the code was generated when the Page Flow was generated from the Liquid Data control (see Generating a Page Flow From a Control). /** * Action encapsulating the control method :RTLCustomerProfile * @jpf:action * @jpf:forward name="success" path="index.jsp" * @jpf:catch method="exceptionHandler" type="Exception" */ public Forward RTLCustomerProfile( RTLCustomerProfileForm aForm ) throws Exception { schemasBeaComLdCustview.PROFILEVIEWDocument var = myControl.RTLCustomerProfile( aForm.custid ); getRequest().setAttribute( "results", var ); //initialize the profile variable to var from the above statement profile=var; return new Forward( "success" ); } Once you create and initialize your variables in the Page Flow, you can drag and drop the variables onto a JSP file. When you drag and drop an XMLBean variable onto a JSP File, Workshop displays the repeater wizard to guide you through the process of selecting the data you want to display. The repeater wizard provides choices for displaying the results in an HTML table or in a list. Perform the following to add a NetUI repeater tag (used to display the data from a Liquid Data query) to a JSP file. Note: You can only drag and drop leaf nodes from the Page Flow Properties. Figure 2-18 Repeater Wizard Figure 2-19 Repeater Wizard Select Format Screen Workshop generates the layout for your data. You can create repeater tags inside of other repeater tags. You can display nested repeaters on the same page (in nested tables, for example) or you can set up Page Flow actions to display the nested level on another page (with a link, for example). Perform the following steps to create a nested repeater tag. Perform the following steps to add code in your JSP file to handle null values for your data. It is a common JSP design pattern to add conditional code to handle null values. If you do not handle null values, your page will display tag errors if it is rendered before the queries on it are executed. . defaultTextattribute does not work for your netui-data:repeatertag, add code before and after the tag to test for null values. The following is sample code. The code in bold is added, the rest is generated by the repeater wizard. This code uses the profile variable initialized in To Initialize the Variable in the Page Flow. <%> <% }%> This section describes security considerations to be aware of when developing applications using Liquid Data controls. The following sections are included: The WebLogic Workshop Application Properties (Tools —> Application Properties) allow you to set the connection information to connect to the domain in which you are running. You can either use the connection information specified in the domain boot.properties file or override that information with a specified username and password. When you create a Liquid Data Control ( .jcx) file and are connecting to a local Liquid Data server (Liquid Data on the same domain as Workshop), the user specified in the Application Properties is used to connect to the Liquid Data server. When you create a Liquid Data Control and are connecting to a remote Liquid Data server (Liquid Data on a different domain from Workshop), you specify the connection information in the Liquid Data Control Wizard Connection information dialog (see Figure 2-6). When you create a Liquid Data Control, the Control Wizard displays all queries to which the specified user has access privileges. The access privileges are defined by any security policies set on the queries, either directly or indirectly. Note: The security credentials specified through the Application Properties or through the Liquid Data Control Wizard are only used for creating the Liquid Data Control ( .jcx) file, not for testing queries through the control. To test a query through the control, you must get the user credentials either through the application (from a login page, for example) or by using the run-as property in the Web Service file. For testing, you can use the run-as property to test a control running as a specified user. To set the run-as property in a Web Service, open the Web Service and enter a user for the run-as property in the WebLogic Workshop property editor. Queries run through a Liquid Data Control used by the Web Service). Note: The Liquid Data Control property editor shows a run-as property, but the run-as property in the Liquid Data Control does not cause the Liquid Data Control to run as the specified user. If you want to use this feature, you must specify the run-as property in the .jws file, not in the .jcx file. If the Liquid Data server is on different domain from WebLogic Workshop, then both domains must be set up as trusted domains. Domains are considered trusted domains if they share the same security credentials. With trusted domains, a user that exists on one domain need not be authenticated on the other domain (as long as the user exists on both domains). Note: After configuring domains as trusted, you must restart the domains before the trusted configuration takes effect. Perform the following steps to configure domains as a trusted: Figure 2-20 Setting up Trusted Domains For more details on WebLogic security, see Configuring Security for a WebLogic Domain in the WebLogic Server documentation. When you move any Liquid Data deployment from development to production, you must move Liquid Data and WebLogic Server resources (JDBC Connection Pools, Liquid Data Data Sources, the Liquid Data repository, and so on) from the development environment to the production environment. For details about deploying Liquid Data, see the Liquid Data Deployment Guide. For applications that use Liquid Data controls, you must also deploy and update the ldcontrol.properties file, which contains connection information for Liquid Data controls. This section describes the development to production lifecycle and provides the basic steps for moving an application containing Liquid Data controls from development to production. The following sections are included: In a typical development scenario, you will develop your applications in one environment and then deploy them in another. There are two main artifacts that you need to deploy on the production environment: Figure 2-21 Development to Production Lifecycle After you have developed and tested your application using WebLogic Workshop in your development environment, you must create a .ear file for deployment to your production server(s). All the resources the application needs are already included in the application Libraries directory, so the only thing you need to do is create the .ear file for the application. Perform the following steps to generate an enterprise archive file (.ear) in WebLogic Workshop: When the build is complete, WebLogic Workshop lists the .ear file location in the Build window. Each domain that runs Liquid Data Control applications has a single ldcontrol.properties file, which stores the connection information for all Liquid Data Control applications running in the domain. The ldcontrol.properties file is located at the root directory of your domain where the Liquid Data Control application .ear file is deployed. There is an entry in the ldcontrol.properties file for each control you have created in each application. The entries in the ldcontrol.properties file are of the following form: AppName.ProjectName.FolderName.jcxName=t3\://hostname\:port Note: The colons ( :) in the URL must be escaped with a backslash ( \) character. If the URL value is missing, the Liquid Data Control uses the connection information from the domain config.xml file. The following is a sample ldcontrol.properties file. #Fri Oct 31 15:30:36 PST 2003 myTest.myTestWeb.myFolder.Untitled=t3\:myLDServer\:7001 myTest.myTestWeb.myFolder.myControl= SampleApp.LiquidDataSampleApp.Controls.RTLControl=t3\:myLDServer\:7001 SampleApp.Untitled.NewFolder.Untitled=t3\:yourLDServer\:7001 testnew.Untitled.NewFolder.ldc= test.testWeb.NewFolder.Untitled= This section describes the following basic steps for moving an application from development to production: Use WebLogic Workshop to generate the .ear file for your application as described in To Generate the .ear File in Workshop. Merge the entries in the ldcontrol.properties file from the root level of your development domain with the ldcontrol.properties file in the root level of the production domain. There must be one entry for each Liquid Data Control. If the ldcontrol.properties file does not exist in the production domain, copy it from your development domain. You must also update the URLs in each entry of the file to reference the production Liquid Data servers. For details on and for the syntax of the ldcontrol.properties file, see Liquid Data ldcontrol.properties File. Deploy your enterprise archive ( .ear) file on the production WebLogic Server. You deploy the .ear file from the domain —> Deployments —> Applications node of the WebLogic Server Administration Console. The .ear file must be accessible from the filesystem in which the WebLogic administration server is running. For details on deploying .ear files, see Deploying WebLogic Server Applications from the WebLogic Server documentation.
http://e-docs.bea.com/liquiddata/docs81/program/ld_control.html
crawl-002
refinedweb
4,882
53.61
In the previous part of this series, we started with the basics for kicking off a new Django project. We prepared our virtualenv, installed needed modules to it, created and integrated a new Heroku – project for it and learned how to work with variables in Heroku to control our application with an easy example. We also learned how to check our results locally before we publish it to our production space and how we can add an addon to our Heroku project by adding a PostgreSQL database to it. Today, we will learn what an “app” is in Django and how to create it. Also, we will learn about and create a so-called URLconf / routing to direct specific URLs to specific parts of our code. Project, App, Application, … what is all this? 🤯 In Django, there are a few terms, which are a bit confusing at first. The fact that careless people tend to use them as exchangeable terms sometimes(including myself, like I did in the previous part by writing “Creating the Django app” even though we created a project 😅), confuses beginners even more. So: Let’s begin today’s article with a short definition of these terms: A project is what we have created using the django-admin startproject dtbot . in theprevious part. It is the absolute lowest entry level of the Django structure. For me, it helps the best thinking about what my personal definition of the term “Project” is without thinking about Django, but the term in general. It may be a collection of pieces, which form the stack build for a specific customer, maybe. For each completely new thing, like a different website or a different customer, you are creating an own project for, usually. That’s exactly what it describes in the Django-World, too. The tutorial describes a project like this: (A Django project is) a collection of settings for an instance of Django, including database configuration, Django-specific options and application-specific settings. The terms app and application are harder to distinguish. To make it even more confusing, an “app” is also called a “package“, sometimes. These two terms mean the same thing absolutely and are 100% interchangeable. If any, most people tend to use the term “app” as the thing that lives in your project tree and “package”, when they are talking about a packaged distribution of that code for shipping or download. But generally, these are describing the same thing. The term “application” does not exist in the Django world as an own term, really. In my experience, people tend to talk about “the application” if “the project” is what they meant in the first place. Another thing which is confusing is the fact that “app” and “application” are not really two different words; “app” is just an abbreviation of the term “application” in the end. Anyways, they are used differently. An “app” is something which is really doing something active in your project instead of defining settings, routes or basics for anything to base upon, like generating a page, receiving data, applying logic on that requests, and so on. Again, this is how the Django Tutorial describes the difference between a project and an app: A project is a collection of configuration and apps for a particular website. A project can contain multiple apps. An app can be in multiple projects. Why multiple apps? If you are not too familiar with this concept, it might look like creating several apps in one project is making everything more complicated than it is necessary. In the end, you might complain, you are about to create just such a tiny little application which seems like not being worth the efforts to structure everything in such a complicated way. The last sentence of the previous quote gives an idea for this already: Reusability is a strong reason. Maybe you want to share the result of your development sooner or later. Or, after having finished this one bot, you might have a great idea for another one, dealing with your shopping list in some way instead of your household-budged. Or maybe these two should even interact with each other later? Or the second bot you create simply does not need huge parts of the functionality of your first bot – why should you carry around “dead” code which makes your bot’s code just more complicated and blown up? Wouldn’t it be great if you had created separated apps for things like: - User registration - Calculations and reporting - Analyzing the message and creating the replies - … The point is: You can’t know at the beginning how your project evolves over time or what additional ideas you might have. Separating functionality into apps make it appear a bit overcomplicated, but as soon as you have made your first steps with this, it won’t feel like being complicated anymore. Just stick to it and stay tuned until we have created our app in a minute! If you want to learn more about apps like: “When does it make sense to separate some functionality to separate apps?” I recommend reading this article about it: 0–100 in Django: Starting an app the right way Creating the app A new application is created, using a command from the manage.py script in the root of your project-dir: … that’s not too exciting, is it? To see what this command has changed in our project, I like to use Git to display all differences: bot/ nothing added to commit but untracked files present (use “git add” to track) (dtbot-hT9CNosh) ~/dtbot $ git add . (dtbot-hT9CNosh) ~/dtbot $ git status On branch master Changes to be committed: (use “git reset HEAD <file>…” to unstage) new file: bot/__init__.py new file: bot/admin.py new file: bot/apps.py new file: bot/migrations/__init__.py new file: bot/models.py new file: bot/tests.py new file: bot/views.py (dtbot-hT9CNosh) ~/dtbot $ Seems as if it has created just one additional folder without touching any of our other files. Fine – this way, it isn’t messing things up. Right now, this is of absolutely no use for us; we need to do some things before we can really start to build something in that app: Writing a view 🧐 … a what? What’s that? A view is … more or less: Code. A function, to be even more precise, which decides what happens to a request that hits your app. The easiest view I can possibly think of is a static reply to any request. In other words: Answering any request with the same, static page. Before I overcomplicate this with my explanations, let’s head for an example: Views are created in the file views.py of the app. So, let’s edit the file bot/views.py . After the app was created, the file has some default content: # Create your views here. Just remove that and replace it with the following: def index(request): return HttpResponse(“Hello, world. This is the bot app.”) Do not think about “How should I know this is what I need to import and use for a simple HTTP response???” too hard for now – just accept it. This is something that comes with the time; in the beginning, you need to read a lot of docs and lookup examples for nearly every baby-step. That’s normal, you are not stupid or so: Everyone needs to get used to this, first! Apart from that, it’s pretty obvious what happens here, isn’t it? First, a module is imported, which seems to generate a response, which can be served by the web server to the client then. A function named index is defined, taking one argument named request . request is not used in that function; so: Not too important what that is. It’s needed in the API of the function anyway though since Django provides this to any view as part of its internal function. Alright! But – how to reach that now with our browser? We need to define a so-called URLconf for this, which is what follows next. Creating a URLconf A URLconf or “routing configuration” is simply a list of URIs, which points towards a view. For example, we could create a URLconf, which calls for all requests to the URL the previously created index view from the bot app. Even though this is not meaningful really, let’s do that to explain this step-by-step without expecting everything to be self-explanatory: Open the file dbot/urls.py in your favorite editor. You will notice, that after some comments, there is already one routing in place: urlpatterns = [ path(‘admin/’, admin.site.urls), ] Again: Ignore the imports for now. Just take the fact that it works like this for granted for now: To map the URL /bot/hello to our index – view in bot/views.py, we first need to step back and remember, what we are doing here: We are writing an app, which eventually can be taken and copied to other Django projects later. This app might have several URLs, pointing to different functions inside of it. Also, a Django project may have several apps installed. Does it really make sense that the potentially complicated or even conflicting URLconf is handled in one central file of the project? Would you really like to solve a naming conflict like one app demanding the URL /conf for itself internally when you had decided to use that very same URL to access your app for configuring your project? Most certainly not. That’s why it’s a common pattern to create sub-paths for each app and “delegating” the URLconf of that branch to the app. This way, you need to create just one single line or each app you are using, instead of dealing with dozens per app and conflicting patterns. To do that, we are changing the file dbot/urls.py in the following way: First, we add include to the list of imported elements from django.urls : Next, we register the path bot/ to be delegated to that app’s own URLconf by adding the following to the urlpatterns – list: This makes Django search for the file bot/urls.py for additional URLconf configurations for everything below the URI bot/ (like bot/hook or similar). The file bot/urls.py is not created by executing python manage.py startapp bot ; we need to create that file ourselves. Let’s do so now with the following content: from . import views urlpatterns = [ path(‘hello/’, views.index), ] And – we are done setting up our demo-URLconf for now! To test this, run the HTTP server locally and access with your browser. This should display the text we entered to our index – view before: Stop: Hammertime !!! I have to admit that I underestimated the extent of this article a lot! That’s why I will make a stop here and postpone the rest of the pre-announced content like creating a database, showing additional Heroku – tools, etc. to the next part to not make this a too big and boring thing to read. Outlook for the next part of the series We just learned about some terminology, what an app is and how it is created and made available. In the next article of this series, we will utilize this freshly gained knowledge to create the hook for our bot, finally. Also, we will create the database. And, because of the lesson I just learned: That’s it – nothing more 😉 If you liked or disliked this article, I’d love to read that in the comments! Enjoy coding!?
https://smartsoftware247.com/create-your-own-telegram-bot-with-django-on-heroku-part-7-introducing-apps-and-urlconf/
CC-MAIN-2019-18
refinedweb
1,948
71.04
in reply to Seeing if two numbers have the same sign You are doing more work than you need to. Remember that mathematical comparisons return booleans, and booleans can be directly compared. if ($x<0 == $y<0) { print "$x and $y have the same sign"; } [download] (($x ^ $y) < 0) would probably also work assuming they are integers. I think the trick with abs or the comparison operator would probably actually be less efficient than doing a comparison. Using multiplication is going to be slow as well. BTW the sign of zero isnt a theological debate, it is an implementation detail of the machine you are working on and the data types you are using. :-) Neat (wish I'd thought of it) but it implicitly assumes that zero is a positive number, given 0 and -1 the test returns 0, given 0 and +1 it returns 1. If the test was ( $x<=0 == $y<=0 ) then zero is treated as both positive and negative and the two previous examples return 1. Which is consistent but might not be what you want. Using sub andNeg { return (($_[0]^$_[1]) < 0); } [download] print "<table border>\n<tr><td>", join("</td><td>", "x", "y", "andNeg( +)", "spaceship()", "mult()", "anchor()"), "</td></tr>\n"; for (0..3) { my $x = 1 - 2 * ($_ % 2); my $y = 1 - 2 * int($_ / 2); print "<tr><td>", join("</td><td>", $x, $y, andNeg($x, $y), spaceshi +p($x, $y), mult($x, $y), anchor($x, $y)), "</td></tr>\n"; } print "</table>\n"; [download] sub spaceship { (0 <=> $_[0]) == (0<=> $_[1]); } sub mult { $_[0]*$_[1] > 0; } sub anchor { if ($_[0] > 0) { if ($_[1] > 0) { # both pos return 1 } } elsif ($_[0] < 0) { if ($_[1] < 0) { # both neg return 1 } } return 0; } [download] full test program (don't use this use Benchmark; instead) Hmm. I didnt actually test it and had assumed that the result of binary xor was going to be signed, but of course its unsigned, hence the xor ideas doesnt work as written. My bad. Sorry. Id just use the $x<0 == $y<0 approach anyway. :-) Mathematically, the sign of zero is undefined. Just as in football, you can't say which side of center the center lines up on. A foolish day Just another day Internet cleaning day The real first day of Spring The real first day of Autumn Wait a second, ... is this poll a joke? Results (434 votes), past polls
http://www.perlmonks.org/index.pl?node_id=661703
CC-MAIN-2014-15
refinedweb
410
67.49
perlquestion hacker I've been working with [ C code] that crunches the [ US Census data] into CSV files, based on the specified proximity to the origin zipcode. The problem is that the C code is horribly slow, and I can't seem to figure out why. It takes my PIII/1.3Ghz/512mb RAM machine about 20 minutes to crunch the 987k input data file for zipcodes matching within a 0-25 radius of the givin origin zipcode. That seems very slow. <p>The master 2000 Census data file contains records in this format:<code> ZIP_CODE ONGITUD ATITUD 00210 71.0132 43.00589 00211 71.0132 43.00589 00212 71.0132 43.00589 00213 71.0132 43.00589 00214 71.0132 43.00589 00215 71.0132 43.00589 ...</code> <p>My output file, separate for each type of range (0-25.txt for zipcodes within 0-25 miles of the origin, 0-50.txt for zipcodes within 0-50 miles of the origin, etc.), contains entries such as:<code> 00210,00210 00210,00211 00211,00210 00210,00212 00212,00210 00210,00213 ...</code> <p>For each given zipcode found in the master file (where <i>origin</i> == 00210 in this case, to start with), I want to output a file that contains all matching zipcodes within the specified proximity to that zipcode. So in the example above, all of the zipcodes within 0-25 miles of 00210 would be output to 0-25.txt, a csv file containing the data shown above. <p>I have the working radii functions which do this, and does work (but is very slow), and looks like: <code> #define EARTH_RADIUS 3956 static inline float deg_to_rad(float deg) { return (deg * M_PI / 180.0); } /* Function to calculate Great Circle distance between two points. */ static inline float great_circle_distance(float lat1, float long1, float lat2, float long2) { float delta_long, delta_lat, temp, distance; /* Find the deltas */ delta_lat = lat2 - lat1; delta_long = long2 - long1; /* Find the GC distance */ temp = pow(sin(delta_lat / 2.0), 2) + cos(lat1) * cos(lat2) * pow(sin(delta_long / 2.0), 2); distance = EARTH_RADIUS * 2 * atan2(sqrt(temp), sqrt(1 - temp)); return (distance); }</code> <p>In perl, this would be:<code> my $distance = sqrt(($x1-$x2)**2+($y1-$y2)**2);</code> <p>My goal is to convert this over to perl, both so I can gain the speed and efficiency of perl (as well as make this portable to Windows systems, where the current C code doesn't quite run yet), as well as expand my knowledge of perl in general. <p>Has anyone done this? Any pointers that might be useful here?
http://www.perlmonks.org/index.pl?displaytype=xml;node_id=246967
CC-MAIN-2017-17
refinedweb
433
73.58
Tell us what you think of the site. Which module contains the function performPolyDeleteElements? It doesn’t appear to be in the maya.cmds module. Also, could someone be kind enough to point me to the documentation for that function? The MEL and Python documentations included in Maya don’t seem to have any information regarding it. its actually a script that comes with maya (check your mayaInstallDir\scripts\others\performPolyDeleteElements.mel) only way i found it accessible (didnt spend a great deal of time tbh) and i dont advise you do this as ‘eval’ is slow and most of the time needless, but it does works, is; import maya.mel as mel mel.eval("performPolyDeleteElements") it is dirty but i couldnt find another way. the performPolyDeleteElements.mel script itself is fairly simple, just uses filterExpand to get selection and using if’s applys DeleteVertex and/or DeleteEdge scripts, which again have the same issue. Hope it helps though. Lee Dunham | Character TD ldunham.blogspot.com
http://area.autodesk.com/forum/autodesk-maya/python/which-module-contains-performpolydeleteelements/page-last/
crawl-003
refinedweb
166
58.69
Through this article, you will learn how to make HTTP GET requests from OpenWeatherMap API with ESP32/ESP8266 in acquiring useful data using MicroPython firmware. With the help of OpenWeatherMap API, we will make HTTP GET request to obtain current weather readings and display those readings on the MicroPython terminal. We have a similar guide for ESP32 and ESP8266 using MicroPython: HTTP GET using ESP32 and Arduino IDE (OpenWeatherMap.org and ThingSpeak) This article covers the followings: - Introduction to API and OpenWeatherMap - Setting up and accessing the OpenWeatherMap API - Introduction to JSON Script - A simple Micro-Python Script to decode JSON data from the API You should also know how to connected ESP32/ESP8266 boards with WiFi networking using network library of MicroPython. If you don’t know, you can read these getting started guides: OpenWeatherMap Introduction API Application Programming Interface or API in short is an interface through which different services communicate with one another without knowing the complex structures through which they are implemented. This is advantageous as it makes our whole process simpler, cost-effective and less time-consuming. In easy terms, APIs conveys a user’s response to a system and in return sends the system’s response back to the user. Although there are several different types of APIs, the one which we will be focusing on is called a web API. Its main aspect is to send requests from web applications and responses from servers through the Hypertext Transfer Protocol (HTTP). This is usually done in the JavaScript Object Notation (JSON) format. OpenWeatherMap API OpenWeatherMap is an online webpage that gives the user information about the weather parameters for any set location. These include temperature, precipitation, humidity, pressure, wind, and even forecasts to name a few. It has accurate and fast APIs which perform this functionality. In our project, we will be requesting the weather forecast for our set location through this API. Before we describe our project in detail, first we have to set up an account in the OpenWeatherMap. Using OpenWeatherMap API Go to the following webpage: to open the API. You will see the following window when the web page loads. Click ‘here’ as shown in the red box to sign up. You will be shown a page where you will have to add your username, password and some other details to create your account. Complete all the details, tick the relevant fields and then click on the ‘Create Account’ button. The webpage will send a confirmation link to the email address you provided. After confirmation, the following appears: Now, go to the ‘API keys’ tab and the following window will appear, carrying your unique API key. The key highlighted in the red box is your unique key which we will be using later. Save this key and for security reasons do not share it with anyone. Obtaining the Weather Update Now as we have set up our account with OpenWeatherMap and have also obtained our key let see look into how to get weather update for a location of our choice. You will have to type the following URL in a new tab:, your_country_code&APPID=your_unique_API_key There are three parameters which we are specifying in the URL. First ‘q=your_city’ is the name of the city whose weather update you are acquiring. Second, ‘your_country_code’ specifies the country code of the city you specified before. Third, you will enter your unique API key which we just saved before. Let us take a simple example. For example, if we want to know the weather update of the city Seoul in South Korea, we will enter the following URL: Copy the following URL in a new window and the following parameters pop up. This is the current weather update of Seoul, South Korea (same time report). As you see above, the information is not easy to decipher. In fact, is in JSON script which we will now learn how to read. MicroPython: Reading the JSON Script The syntax we obtained above was in JavaScript Object Notation or JSON in short. APIs usually communicate through this universally accepted script for ease. As you can see there are different types of symbols including {} [] “ ”, The following rules apply to JSON Script - Data is depicted through name/value pairs - After every name, we put a colon (:) - Commas (,) separate the names - Curly brackets {} are used for objects - Square brackets [] are used for arrays Now I will arrange the JSON Script which we obtained for Seoul in a readable manner according to the rules stated above. according to the rules stated above. { "coord":{ "lon":126.9778:, "lat":37.5683 }, "weather":[{ "id":800, "main":"Clear", "description":"clear sky", "icon":"01d" } ], "base":"stations", "main":{ "temp":291.52, "feels_like":290.59, "temp_min":290.15, "temp_max":292.15, "pressure":1021, "humidity":45}, "visibility":10000, "wind":{ "speed":6.17, "deg":110 }, "clouds":{ "all":0 }, "dt":1619314324, "sys":{ "type":1, "id":8105, "country":"KR", "sunrise":1619297049, "sunset":1619345742 }, "timezone":32400, "id":1835848, "name":"Seoul", "cod":200 } MicroPython Script: HTTP GET OpenWeatherMap.org. Create a new Micropython file in uPyCraft ide or thonny IDE and save it as boot.py. Copy the code given below. # Visit Microcontrollerslab.com for complete project details import time try: import urequests as requests except: import requests try: import ujson as json except: import json import network import esp esp.osdebug(None) import gc gc.collect()' station = network.WLAN(network.STA_IF) station.active(True) station.connect(ssid, password) while station.isconnected() == False: pass print('Connection successful') print(station.ifconfig()) #set your unique OpenWeatherMap.org URL open_weather_map_url = '' + city + ',' + country_code + '&APPID=' + open_weather_map_api_key weather_data = requests.get(open_weather_map_url) print(weather_data.json()) # Location (City and Country code) location = 'Location: ' + weather_data.json().get('name') + ' - ' + weather_data.json().get('sys').get('country') print(location) # Weather Description description = 'Description: ' + weather_data.json().get('weather')[0].get('main') print(description) # Temperature raw_temperature = weather_data.json().get('main').get('temp')-273.15 # Temperature in Celsius temperature = 'Temperature: ' + str(raw_temperature) + '*C' #uncomment for temperature in Fahrenheit #temperature = 'Temperature: ' + str(raw_temperature*(9/5.0)+32) + '*F' print(temperature) # Pressure pressure = 'Pressure: ' + str(weather_data.json().get('main').get('pressure')) + 'hPa' print(pressure) # Humidity humidity = 'Humidity: ' + str(weather_data.json().get('main').get('humidity')) + '%' print(humidity) # Wind wind = 'Wind: ' + str(weather_data.json().get('wind').get('speed')) + 'mps ' + str(weather_data.json().get('wind').get('deg')) + '*' print(wind) How the Code Works? Import Libraries We will start off by importing all the necessary libraries, modules, and classes. urequests and ujson libraries are used in making HTTP GET and POST requests to get and send required data to a web client or web server. The network library is necessary as we have to connect our ESP boards to the local network through the router. import time try: import urequests as requests except: import requests try: import ujson as json except: import json import network import esp esp.osdebug(None) import gc gc.collect() Connecting to Network The next step is to enter all your credentials including the local network as well as the location parameters. Enter your Wi-fi name and password through which you will be connecting your ESP board. Then enter the city name, its two-digit country code, and the unique API key which you would have already saved before. Note the API key is different for every user and should not be shared.' Then connect to your local network through the following block of code. station = network.WLAN(network.STA_IF) station.active(True) station.connect(ssid, password) while station.isconnected() == False: pass print('Connection successful') print(station.ifconfig()) Make HTTP GET Request Next, comes the important part. We will now be creating an API request by forming a variable ‘open_weather_map_url’ which contains the URL which displays the weather updates. This URL will be fed with the location and key parameters which we specified above. You can change the credentials according to your need. open_weather_map_url = '' + city + ',' + country_code + '&APPID=' + open_weather_map_api_key This variable will be useful in making the API request. We will use requests.get(open_weather_map_url) to make the API request. This data will be saved in the variable named ‘weather_data.’ weather_data = requests.get(open_weather_map_url) We will be converting the data stored inside weather_data into JSON syntax by using weather_data.json(). The .json() method is being used here. Then we will print the readings on the Shell of our IDE. print(weather_data.json()) We can also use the HTTP get() method to obtain specific parts of the data. For example, if you want to access the data about sunset which is present inside sys as seen below: "sys":{ "type":1, "id":8105, "country":"KR", "sunrise":1619297049, "sunset":1619345742 }, Then we will give the following command: weather_data.json().get('sys').get('sunset') Copy the above code to boot.py file and upload it to ESP32 or ESP8266. After that press the reset button of your device, you will see the temperature, humidity and pressure readings in your specified location as follows: Output in Thonny IDE: Output in uPyCraft IDE: In this article, we learned how to create an HTTP GET request from OpenWeatherMap API using ESP32/ESP8266 in Micropython. You may also like to check these Weather station projects with MicroPython: - MicroPython: BME280 Web Server with ESP32/ESP8266 (Weather Station) - MicroPython: DS18B20 Web Server with ESP32/ESP8266(Weather Station) - ESP32/ESP8266 MicroPython Web Server – Control Outputs - MicroPython: DHT11/DHT22 Web Server with ESP32/ESP8266 (Weather Station) - ESP32 HTTP POST using Arduino IDE (ThingSpeak and IFTTT)
https://microcontrollerslab.com/micropython-openweathermap-api-esp32-esp8266-sensorless-weather-station/
CC-MAIN-2022-33
refinedweb
1,564
55.34
A typical form of a Panda program might look like: from direct.showbase.DirectObject import DirectObject # To listen for Events class World(DirectObject): def __init__(self): #initialize instance self. variables here def method1(): # Panda source goes here w = World() run() # main loop run() is a function that never returns. It is the main loop. run() For an alternative, run() could not be called at all. Panda doesn't really need to own the main loop. Instead, taskMgr.step() can be called intermittently, which will run through one iteration of Panda's loop. In fact, run() is basically just an infinite loop that calls Task.step() repeatedly. taskMgr.step() Task.step() taskMgr.step() must be called quickly enough after the previous call to taskMgr.step(). This must be done quick enough to be faster than the frame rate. This may useful when an imported third party python module that also has its own event loop wants and wants to be in control of program flow. A third party example may be Twisted, the event-driven networking framework. The solution to this problem is to let Panda3D's loop be controlled entirely by twisted's event loop. You will need to use the LoopingCall method to add Panda's taskMgr.step() method to twisted's event loop. Then, you need to call reactor.run() instead of Panda3D's run() method to run twisted's event loop. Here's an example on how this will work: reactor.run() from twisted.internet.task import LoopingCall from twisted.internet import reactor LoopingCall(taskMgr.step).start(1 / Desired_FPS) reactor.run() You will need to replace Desired_FPS by the desired framerate, that is, how many times you want Panda3D to redraw the frame per second. Please note that reactor.run() is blocking, just like Panda's run() method. Another third party example is wxPython GUI, that is a blending of the wxWidgets C++ class library with the Python programming language. Panda's run() function, and wx's app.MainLoop() method, both are designed to handle all events and never return. They are each supposed to serve as the one main loop of the application. Two main loops can not effectively run an application. app.MainLoop() wxPython also supplies a method that can be called occasionally, instead of a function that never returns. In wx's case, it's app.Dispatch(). app.Dispatch() A choice can be made whether or not to make wx handle the main loop, and call taskMgr.step() intermittently, or whether or not to make Panda handle the main loop, and call app.Dispatch() intermittently. The better performance choice is to have Panda handle the main loop. In the case that Panda handles the main loop, a task needs to be started to call app.Dispatch() every frame, if needed. Instead of calling wxPython's app.MainLoop(), do something like the following: app = wx.App(0) def handleWxEvents(task): while app.Pending(): app.Dispatch() return Task.cont taskMgr.add(handleWxEvents, 'handleWxEvents') run() # Panda handles the main loop In the case that wxPython handles the main loop using app.MainLoop(), to keep the framerate quick and reduce the CPU, add sleep(0.001) in the body of the program. This will yield to Panda. After the sleep is over, control will return to wxPython. wxPython can then check for user events. wxPython's user generated callback events are generally generated only at infrequent intervals (based on when the user is interacting with the window). This is appropriate for a 2-D application that is completely response-driven, but not very useful for a 3-D application that continues to be active even when a user is not interacting with it. sleep(0.001)
http://www.panda3d.org/manual/index.php/Main_Loop
CC-MAIN-2018-39
refinedweb
618
68.47
Intro: How to Make a Computer Game(in Unity) Video Coming soon Perhaps the biggest tribute that a gamer can make for his love of games is to make a game himself. Hi my name is Vazgi and in this instructable I am going to teach you how to make a computer game. Let`s have some fun making a game! Things you will need: - Decent pc or laptop. - Some free time. - The drive to make your first game. Disclaimer: I am in no way a professional game developer so please don`t criticize me for any mistakes that I may have made. I just love computer games!!! Step 1: The Engine Ok, so the first thing you need is an engine. An engine is the backbone of a game. Basicly the engine creates the game world. There are many popular engines. There is unity, unreal engine, cryengine and many more. There are many videos and documentation on each of them so just pick one. I personaly use unity for 2 major reasons. - first in unity porting a game for a different system is as easy as doing a couple clicks. - second the Unity asset store. Step 2: The Assets Assets are the second thing you will need to make a game.Basicly everything is an asset in a game. Scripts, sounds, animations, 3d objects, everythings is an asset. And unity provides its users with a large amount of content made by the team of unity or by other users. Some are paid some are not. I would suggest to stick to the free for your first game. If you have picked another engine a good idea is to learn to use a 3d modelling software such as blender ,maya or zbrush. Step 3: Lower Your Expectations Concerning your first game I have to disappoint you that your game would be nothing like Skyrim, Bioshock or The Witcher or any well-known video game.A good idea is to start small so that you don’t just quit in the beginning. Try making a really simple game like Mario or space invaders. Make it small and make it work. Then slowly go to more complicated projects. Step 4: Share With Your Friends The last part is to share it with your friends. Ok, so what`s the point to a game if only you are the only one that plays it. You probably have dozens or even hundreds of friends in the social medias. Facebook, Twitter ,Youtube share it everywhere. But don`t get discouraged if there are some bad remarks on your game. Use it to better your game. In the end they will be able to give you feedback witch you can use to make your game better. And as time goes by you will get better and better. Step 5: The Sample Game From the Video As I said before your first game must be simple not something too complicated. In the first picture you see the game from the step by step video. And on the second almost the same game just with the 3dmodel platforms not just cubes (such a simple 3d model can be created in less then an hour even if the person had no previous knowledge of 3d modeling software). So just make something fun for you then look to please the audience. Step 6: Scripting In Unity you can write code in javascript or in c#. But an interesting thing is that you can use both of them in the same project. So if you have a friend who only know js and you only know C# you can still make a game together. Here are the scripts I used in the game: WIN.js #pragma strict private var drawGUI = false; private var doorIsClosed = true; function OnTriggerEnter(theCollider : Collider){ if (theCollider.tag == "Player") { drawGUI = true; } } function OnTriggerExit(theCollider : Collider){ if (theCollider.tag == "Player") { drawGUI = false; } } function OnGUI () { if (drawGUI == true){ GUI.skin.label.fontSize = GUI.skin.box.fontSize = GUI.skin.button.fontSize = 23; GUI.Box(Rect (Screen.width*0.5-51,200,220,38), "You Won The Game"); } } FallingScript.cs using UnityEngine; using System.Collections; public class FallingScript : MonoBehaviour { public GameObject Cube; void OnTriggerEnter (Collider col) { if (col.gameObject.tag == "Floor") { this.transform.position = Cube.transform.position; } } } Step 7: The End If you have liked the instructable please consider voting fot me in the contest. I hope you make an awsome game. If you like please share it (your game) with us in the comment section below. Disclaimer: This is my second instructable so I may have made some mistake feel free to point them out to me so I can better myself. Discussions
https://www.instructables.com/id/How-to-Make-a-Computer-Gamein-Unity/
CC-MAIN-2018-39
refinedweb
783
83.66
Requirements - Xcode 10.0+ - Swift 4.2+ - Alamofire 4.7.3+ Legacy Swift Support If you need to support an earlier version of Swift, please either download the zip or point your Podfile at the coresponding tag: - Swift 4.0: tag "Swift 4.0 Final" - Swift 3.x: tag "2.0.1" - Swift 2.2: tag "Swift 2.2 Final" - Swift 2.3: tag "Swift 2.3 Final" The respective readme’s in those tags have more explicit instructions for using tags in CocoaPods. Installation Cocoapods AlamofireRSSParser is available through CocoaPods. To install it, simply add the following line to your Podfile: pod "AlamofireRSSParser" Then import AlamofireRSSParser wherever you’re using it. Manually Alternately you can add the contents of AlamofireRSSParser/Pod/Classes/ to your project and import the classes as appropriate. Usage Note: To run the example project, clone the repo, and run pod install from the Example directory first. You use AlamofireRSSParser just like any other response handler in Alamofire: let url = "" Alamofire.request(url).responseRSS() { (response) -> Void in if let feed: RSSFeed = response.result.value { //do something with your new RSSFeed object! for item in feed.items { print(item) } } } AlamofireRSSParser returns an RSSFeed object that contains an array of RSSItem objects. What It Does and Doesn’t Do I think we can all admit that RSS implementations are a bit all over the place. This project is meant to parse all of the common, high level bits of the RSS 2.0 spec that people actually use/care about. It is not meant to comprehensively parse all RSS. RSS 2.0 spec elements that it currently parses: - title - link - itemDescription - guid - author - source - pubDate - enclosure - category In addition, since this is a Swift port of what was originally the backbone of Heavy Headlines it also parses portions of the Media RSS Specification 1.5.1. Current elements: - media:content - media: thumbnail - content: encoded It also yanks all of the images that may be linked in the itemDescription (if it’s HTML) and creates a nice array named imagesFromDescription that you can use for more image content. If you need more elements parsed please file an issue or even better, please contribute! That’s why this is on GitHub. Author Don Angelillo, [email protected] Inspired by Thibaut LE LEVIER’s awesome orginal Block RSSParser AFNetworking Plugin. License AlamofireRSSParser is available under the MIT license. See the LICENSE file for more info. Latest podspec { "name": "AlamofireRSSParser", "version": "2.2.0", "summary": "An RSS parser response handler for Alamofire", "description": "An RSS parser plugin for Alamofire. Adds a "responseRSS()" responseHandler to Alamofire.", "homepage": "", "license": "MIT", "authors": { "Don Angelillo": "[email protected]" }, "source": { "git": "", "tag": "2.2.0" }, "platforms": { "ios": "8.0" }, "requires_arc": true, "swift_version": "4.2", "source_files": "Pod/Classes/**/*", "dependencies": { "Alamofire": [] } } Fri, 26 Oct 2018 20:40:19 +0000
https://tryexcept.com/articles/cocoapod/alamofirerssparser
CC-MAIN-2020-24
refinedweb
468
60.41
You can subscribe to this list here. Showing 25 50 100 250 results of 32 Does anyone else think that the excessive amount of mail sent to the arianne devel list might be more efficient if it were on a web forum, or in an IRC channel, where things could be more quickly replied to, and would be more logically laid out? -- Benjamin, alias CAIMLAS caimlas@... 1 Corinthians 10:13 Thursday 08 February 2001 16:20 AD, MIGUEL ANGEL BLANCH LARDIN wrote < > The compiler IMPLICITLY generates those if you don=B4t and it won=B4t < generate < > any special commands to copy dynamically allocated stuff, it will < simply < > copy < > the pointer. < > If either of the copies is then destroyed and deletes the pointer, < > the other remains with a dangling pointer. < < Yes, note: The FIRST thing I do when starting a data-class (like eg AVector) i= s=20 that I declare constructor, copy-constructor, assignment-operator and=20 destructor. < This is one of the things that is delaying me in using STL, and of < course, the containers of base class that destroy the subclass. EX: < < class A < { < } < < class B < { < } What is the relation between the above two classes ? I assume you mean=20 class B: public A . < list<A> l; < l.push_back(B()); You can=B4t store different types in an STL-container, not even when they= are=20 related. You have to use pointers then. Or better, some kind of proxy tha= t=20 automagically deletes the object pointed to. < B t=3Dl.pop_back; //<-- Error pop_back returns void, there=B4s an explanation for this in Stroustroup=B4= s book. instead use: l.push_back(B()); B t=3Dl.back(); l.pop_back; < So I have to use pointers, so why I need STL list, as I have the same < kind of problem that with AList. Funny thing about STL is that all these classes have a common interface. = That=20 makes things much easier sometimes. Especially concerning that STL is not= =20 only container-templates but also algorithms.=20 Things like=20 copy(cont.begin(), cont.end(), ostream_iterator<type>(cout)); are imho much easier to do. And the cool thing is that it doesn=B4t matte= r what=20 type (vector, list, map ) the container is ! < Perhaps using autoptr, I have to read it again. No. auto_ptr is impossible to use with STL-containers. Boost=B4s shared_p= tr=20 might be what you want. Thursday 08 February 2001 18:11 AD, MIGUEL ANGEL BLANCH LARDIN wrote < I have tried namespace on the NGarianne, but it didn't work. < What is the problem with polluting the global name space? This pollution makes it harder to find a name for a function. If all the=20 SDL-functions didn=B4t begin with SDL or SDLNet or something like that, t= here=20 would be collisions between names.=20 You can always prefix a set of related functions with some identifier but= =20 these names tend to get lengthy to write and read. With namespaces you ca= n=20 explicitly choose the function via 'MyNamespace::SeldomUsedFunction()' or= if=20 you use functions of that namespace all the time, you can include the=20 namespace via 'using MyNamespace;'. This tends not to be of great importance for us (application-developer) b= ut=20 rather for library-developer. Imagine a library that provides a class cal= led=20 Port or Socket. Chances are that this name is already used by some other=20 library. Therefore SDL++ has 'namespace SDL{class Port; class Socket; }'. uli Thursday 08 February 2001 18:15 AD, MIGUEL ANGEL BLANCH LARDIN wrote < > < > -. < If you move in one direction it could mean that you go up the stairs. Then not only your xy changes but z too. The intent of all the stuff I posted was to propose some test-cases: if you can easily and elegantly program these cases, your design was well= =20 done, if you have to do some awkward things that will easily break if som= eone=20 tries to change it, you failed. The main problem in programming the RP-stuff is when more than one objec= t is=20 involved in an action. For simple actions, you can always define an activ= e=20 and one or more passive objects. The action could then go into one of the= =20 active object=B4s member-functions. uli Thursday 08 February 2001 18:30 AD, MIGUEL ANGEL BLANCH LARDIN wrote < > btw: There should be a better way for others than MacOS, < > too, if that=B4s what it is. < > pseudocode: < > LoadWorld() < > while(! end) < > Run() < > SaveWorld() < > exit() < < Yes, but what determine end. < The only elegant way of doing is by remote administration. Pressing a < key to kill the server simply sucks. Well, that=B4s what I mean: you _have_ to kill it ! There=B4s no other=20 way to shut it down. At least on linux, you can install a signal-handler that catches=20 Control-C. From there on, you could do a shutdown.=20 I once tested that, I=B4ll see if I find a graceful way to incorporate it= =20 into the server. (got other probs currently...) uli Thursday 08 February 2001 18:18 AD, MIGUEL ANGEL BLANCH LARDIN wrote < BTW Why list don't have the [] operator defined? So How I see a < element given the index? < You just don=B4t. A doubly linked list is extremely slow if you want to a= ccess=20 an element at a given index. It is also slow to compute the size because = it=20 might have to count all the elements ! If you=B4re thinking of something like for(int i=3D0; i<list.size(); i++) list[i]->Something(); you=B4re on the wrong way. That would be against the spirit of STL. To go= with=20 STL, use this for(list<type>::iterator it=3Dlist.begin(); it!=3Dlist.end() ++it) (*it)->Something(); or even for_each(list.begin(), list.end(), mem_fun(&type::Something)); uli >> hmm. but we shouldn't worry too much for; >> A) >> This community is growing;) > >This is good and bad. Do you kno about the Damocles's sword? What do u mean? I get the expression but I don't know what u would like to say. . Well, some people live in Europe, others in Japan or Australia, there is USA too so try to find an hour that everyone can take :-) Anyway, there is an IRC available (P.S: Is it written on the webpage for people who'd like to speak with us directly?) it is irc.openprojects.net and the channel is #arianne. Some of the list are trying to be connected in this room whenever they're connected. -- ===================== MILLA olivier otherwise@... dwingeloo@... ICQ: 101386336 BeOS developer #2246 ===================== >... Yes, we will add as much races as possible. When I say all the creatures, I mean all: Dragons, Trolls, ... > >??" So you also have seen my Baldur's Gate character? X-D > >. Could be, but really isn't important at this stage. <IMPORTANT> > >. This is an important topic. We have to get an agreement here. </IMPORTANT> > >@... If I found it I will republish it to here. I simply have too many mail and a bad memory. :-) > > Karma & essense > > I think that the actual way is OK, although it is very complex ( the > > idea ) and isn't explained to its full. > > Maybe it's something that could be push a little furter.. > Not much game have included those kind of thing. Well, none have tried to do before such a complex task. :-) Really if arianne ever get to run, it will have a world more,more complex than any other game/app know by now. > Magic should be available to everyone, difference is that warrior are > weak > at using it, and don't have acces to lots of spells, compared to a > wizard. Just on Arianne there is no classes, it is skill oriented, so you can change your sword for your casting book and voila: a soccerer. > > Charisma is skipped because of it should be applied to PC as it is > > applied to NPC. So I think that we shouldn't have it. > > I don't understand what u mean?? Imagine that you have charisma high, so you talk to NPC and they join to your group, ok? Now if a NPC with a higher charisma tell you to join it, you MUST join as charisma rules make you to do it. So I prefer no charisma at all. > > ?? I mean that my idea is to create an alife world. So "monsters" would reproduce, die of hungry and such things. No magical reappearment. Also I expect the NPC will be able to open doors to attack other NPC/PC. :-) > > Adversities > > Light. > > I think that this is related to a Vampire like character, if it > > recieves light it dies. > > That could be fun, but that means the world is real time clock > real time day and night ?? We haven't decided yet. I think that Brian said something about days of 27.5 hours. > > "...) No problem, everytime is someone first time. :-) > Tell me if Im wrong but > You always discust about > all the aspect of the game > here on this new group ? Yes, we do. > Here is a suggestion, > Why don't you organise > net meeting ( Im not talking > about M$ software ) IRC? Well, we try but we have the timezone problem, we are all across the world, so find a time to meet all is hard, really hard. But I agree that is a good way, I often are only at irc.openprojects.net #arianne. >. Timezone problems. >. 6. New questions, problems appear and are solve faster. > What do you think of this ?? That is the correct way, but at 21:00h on Spain, Brian can be working, Uli,Olivier,Djagg,... and I can be taking dinner, and Minami can be just getting up for work. So it is really hard to so such a meeting and make it to be sucessfull. Anyway, when I get Internet at home, I will have no problem with the time of the chat. Mensaje citado por: Ulrich Eckhardt <Doomster@...>: > Hi folks! > I have made a small structural change concerning the > common/Message-directory. > All of the message-types defined therein had a common interface,=20 therefore > I > decided to put that interface in a base-class (MESSAGE.h,=20 MESSAGE.cpp). > The transition is not yet complete but some classes are already=20 converted. > > During one of these changes, I must have made a mistake: it all=20 compiles, > but > client and server occasionally segfault. I believe that the client=20 expects > a > message in one format, but the server sends it a different way ... > I haven=B4t yet gotten into the usage of these messages but if eg > S2C_MapPiece > needs _exactly_ the same binary message layout as C2S_MapPiece this=20 could > be > the place where I messed things up. > > > I=B4m comitting these changes anyway since I hope to resolve this=20 error > quicker > with your help. Err! Arianne code is in a dead line, just for development better to go for=20 NGarianne. I think that the same is done on NGArianne, but I am far from a=20 correct inheritance, just only a abstract base class. Anyway I will checkout code and try to find it. Ok, I have a book about AI, I skipped frames piece. But it could be interesting, would you like to do the research about it? That is how data structures are, known problems with frames, and so... > I think we should take a little more time for NGarianne because; > 1) > It's done solely by Miguel, in a very short time. It is very simple anyway. Just I follow the UML design with the ideas from Brian. > 3) > It's not working on Mac ( reason is mutex and thread stuff ). > 3-a) > Mac support needs time for I need to study loads of things..! I have a definitive idea. Client will be threadless. Performance on client will be limited by Renderer, so we can use poll on client, it would easy on Mac. Server will use threads. So we can code a single thread Mac server or simply ignore Mac server. Coding a single thread server would mean to recode layers 0,1,2,3 at least, but isn't a too work. > 4) > BeOS seems to have problem building Arianne code, too. > ( BeOS people---it's gonna work on MacOS --- why not on Be ?;)) I really don't know what is actually the status of the BeOS port. It works on my computer, and I have only get errors from Milla. I know that it has been downloaded, but there are no complaints. > hmm. but we shouldn't worry too much for; > A) > This community is growing;) This is good and bad. Do you kno about the Damocles's sword? > B) > Lots of ideas, also growing (well, yes it's quite a pressure for > coders..;)) No really, the most idea now the less surprise changes later. > So---please give me some time to summarize Uli's postings. Really, Minami, take this as a hobby. Do what you can when you can. Take as much time as you need. Notes: all word between "" are word im not sure of, I maybe good in english, but Im still far from beeing bilangual. MIGUEL ANGEL BLANCH LARDIN wrote: > > Hi, > > At last, I have been able to read the comments to the RP system. > Here are my ideas/comments. > > In Arianne, you can play ALL the creatures. Only for the sake of > gameplay we should limit the type of the initial creature to human, so > when you reach a high level on human and you die, you can choose to > reincarnate in a better type of creature. Anyway this will require a > good karma/essense.. > About Gender, I think that hermaphraditic is a type of Gender. > > About size, I think that it shouldn't be constant, but being in a > range, so you can be 170 cm tall when speaking about mean, but you can > be smaller or taller. > >. > Intellect. > Well, Arianne isn't combat oriented. We don't want to do a clone of > Diablo, we ( at least I ) are on the way of creating communities, and > encourage diplomacy wars, economic wars, or ecological wars,... Yes less fight and more mind would be cool. >@... > Stats > Again two options: > 1) Constant stats and modification on secondary skills. > 2) Modification on all the stats. > > The type 1 encourage the choice of a good character at something, > example, strengh, and use only that ability. The second could make the > players more equal. Anyway there could be hidden differences between > both. The two of these are good, But it's more fun for player if they can boost they're stat along the game. > Willpower==stamina > This is just a matter of name, I think that for me is clearer the > Willpower one, but perhaps isn't for an english speaker. I though stamina was some kind of body resistance for hard work or something like that.. Was I wrong all those years??? > Karma & essense > I think that the actual way is OK, although it is very complex ( the > idea ) and isn't explained to its full. Maybe it's something that could be push a little furter.. Not much game have included those kind of thing. > Character Innate Physical Abilities > This should be stats of the player, no matter how you look the > problem, a dragon and a whale has very different physical abilites. > > To Magic or not to Magic > Well, again two options: > 1) Magic only for those who want it > 2) Magic for all. > > IMO magic should be avaliable to all the characters, if you want, use > it or if not, just skip it. I ( and most of us I think ) don't like > the class system of AD&D. Just because I am a warrior, this don't mean > that I shouldn't be able to use magic. > > Magic exist on Arianne world, if you skip it, you skip an important > part of the game. Magic should be available to everyone, difference is that warrior are weak at using it, and don't have acces to lots of spells, compared to a wizard. > Characters Social Abilities > Well, Language are related to abilities of a character to speak to > characters of other races/regions. Tout a fait d'accord ( I agree ) > Charisma is skipped because of it should be applied to PC as it is > applied to NPC. So I think that we shouldn't have it. I don't understand what u mean?? > Thieving abilities. > Well, it is annoying to have a Warrior of level 100 trying to destroy > you with his Double hand sword + 5. > The ability will be included. > > Exploring > I think that these abilities could be interesting. Yes I though of that someday. Since the world is going to be big... (really really big) maybe every caracter could have a world map it draw to himself and can share it with other player, this could be interesting selling article. Hey who want this part of the world map.. it's for you at the low price of your soul... mouhahahahaha > "these should be independent of battle abilities. people want to be > able to go kill stuff then go to town and chat with friends and do > some crafts, make something interesting" > ?? > Adversities > Light. > I think that this is related to a Vampire like character, if it > recieves light it dies. That could be fun, but that means the world is real time clock real time day and night ?? > Material > The adversities would be related to the character, not to the race. > Just image that you are a wolf-human and you dress with a silver > armor. Wooo that's got to hurt!!! > > "...) > Well, creative skills, will be refine just to add the creation of new > items. > > Alignments > "agreed on all except this one- reputation per race should start > different, but theres no reason a dwarf that spends years helping > elves can't have a good reputation with them later". > But at first he would have a bad reputation, don't you think so? > > Motion/Encumberance thing. > I get this one explained from Brian, Motion relates to how hard to to > move the object, while emcumberance is related with the dificulty of > lift the object. > > Speed > I think that this is related to something like a soccer ball, once you > kick it it moves. I think that this attrib is useless. > > Transparency > I agree with you. > > On Arianne all the Objects are unique. > > Well, that's all. > The RP Doc is very old now, and it should be update and expanded, > Brian is on this. > If anyone would like to help Brian with this, just contact him. Well Brian I would help, but I don't have much time these day, but if you really need help, just tell me, I'd be happy to help with this. > > _______________________________________________ > Arianne-general mailing list > Arianne-general@... > -- =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Patrick Ouellet - Programmeur Sénior patrick.ouellet@... Recherche & Devloppement Les Entreprise Microtec inc. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= One Future, two choices: Oppose them or let them destroy us!! Word from a Linux user, speaking of Micro$oft =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Mensaje citado por: Ulrich Eckhardt <Doomster@...>: > error message: > X Error of failed request: BadValue (integer parameter > out of range for operation) > Major opcode of failed request: 129 (MIT-SHM) > Minor opcode of failed request: 3 (X_ShmPutImage) > Value in failed request: 0x282 > Serial number of failed request: 672 > Current serial number in output stream: 673 > > > It happens if I grab the funny circle with the flame > (what is it for, btw) and shove it out of the window. > To make it a bit more exact: when the _flame_ crosses > the lower or right border, this error is triggered. It looks like an SDL error. But I would like to take it a look too. Anyway the flame is there to test the animated sprites of the GUI, isn't it? Mensaje citado por: Ulrich Eckhardt <Doomster@...>: > #ifdef macintosh > gDone =3D false; > EventRecord theEvent; > while( ! gDone ) > #else > while(1) > #endif > { > // server loop > } >=20 > Is the reason for this that you can=B4t kill the server with eg.=20 Control-C ? On Linux, you can. I really not sure if it is possible (or desired ) to read from std=20 input from a thread. In the future the only way to kill the server=20 will be with an ADMIN to SERVER message. ( Yes, remote administration=20 ), for security reasons you are going to be able to lock the valid IP=20 for remote administrations. =20 > Could you explain what the YieldToAnyThread-call does, please? As Mac don't have a cooperative scheduler, you have to do the=20 scheduling, this is a big problem when using semaphores or blocking=20 Queues, as NGarianne uses. > btw: There should be a better way for others than MacOS,=20 > too, if that=B4s what it is.=20 > pseudocode: > LoadWorld() > while(! end) > Run() > SaveWorld() > exit() Yes, but what determine end. The only elegant way of doing is by remote administration. Pressing a=20 key to kill the server simply sucks. Mensaje citado por: Patrick Ouellet <patrick.ouellet@...>: > > Where can I find all the docs you wrot e for this > project, I would like to get involved. Design Docs on download section is a good start point. Minami's web page is also a good source. ASAP I will join all the documents posted to the list and add it to Design Doc. > < Sure!, this is because I am writting as much doc as I can. > Where is it ? It surely is not in cvs. By the way, for all docs > it could be useful to always add a date of the last overhaul. No, no CVS. Most of them are posted to the List. But also DesignDoc is a good read. I wanted to do it with Javascript, so that a document of more that a month is showed as outdate. > common/network.h is unused > > Scheduler is a no-op > > WorldManager, AIManagers, AISimpleManager are no-ops Yes. > > What does TDA mean (TDA.h) Abstract Data Type, like a list or a queue. What are the initials on english? ADT? > TimedWait needs a usage-example in the header Miguel's workload.push_back(TimedWait example). BTW Why list don't have the [] operator defined? So How I see a element given the index? > < > -. > Bad example: > class SysInfo > { > public: > static void GetSystemInfo(Message_C2S_SYSTEM_INFO *mes); > }; > > > You can=B4t make any instances (no public ctors, no friends, no static > creator-functions). I now made this a simple function. Sure, I think that my original idea was that SysInfo Host the Stats of=20 the system, I suppose that this class is also part of the crap that is=20 on the old code. > One reason to use something like this is eg Screen: there can only=20 be one > single instance of it. Making sure that it really is only one can be=20 done > via > static data. In this case, the class doesn=B4t serve as a plan that=20 describes > > how to make objects but is an object itself=20 >(I heard smalltalk puts much > more > emphasis on the class being an object). Yes, It is so that even class are object itself!. The problem is that I have learnt OOP with Smalltalk, so most of the=20 concept are very easy on Smalltalk but a pain when you move to C++,=20 Smalltalk manages the inherit automatically, you can chain the method=20 call with the super keyword, now I have found how to do this with C++,=20 but isn't so easy. > Another reason is that one doesn=B4t want to pollute the global=20 > namespace, > but > then one could easily make this pseudo-class a namespace. I have tried namespace on the NGarianne, but it didn't work. What is the problem with polluting the global name space?. And then use the news group to post the resume. Here's an exemple: ----------------------------------------------- Meeting on IRC on Febuary 08 2001 at 21h00 To be diffused on irc.openprojects.net on channel #arianne Topic 1. RP System 2. Character Innate Physical Abilities: 3. ...I don't have any idea... Moderated by : Someone ( let's say, I pick one up : Brian Thompson ) Everyone's invited to share there idea. ------------------------------------------------ What do you think of this ?? -- =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Patrick Ouellet - Programmeur Sénior patrick.ouellet@... Recherche & Devloppement Les Entreprise Microtec inc. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= One Future, two choices: Oppose them or let them destroy us!! Word from a Linux user, speaking of Micro$oft =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= > The compiler IMPLICITLY generates those if you don=B4t and it won=B4t=20 generate=20 > any special commands to copy dynamically allocated stuff, it will=20 simply > copy=20 > the pointer. > If either of the copies is then destroyed and deletes the pointer,=20 > the other remains with a dangling pointer. Yes,=20 This is one of the things that is delaying me in using STL, and of=20 course, the containers of base class that destroy the subclass. EX: class A { } class B { } list<A> l; l.push_back(B()); B t=3Dl.pop_back; //<-- Error So I have to use pointers, so why I need STL list, as I have the same=20 kind of problem that with AList. Perhaps using autoptr, I have to read it again. > The reason is that I=B4m feeling sabotaged by some who go ahead and=20 leave > rubbish in their paths. > Example: I started looking at SimpleAIManager and found some minor=20 glitches > > like passing an AString by 'AString&' and by value instead of=20 'AString > const&' or allocating objects on the heap without proper safeguards=20 against > > memory leakage. > > So far nothing serious but then I found that it in fact does=20 NOTHING. > It a non-functional class without raison d=B4etre or even a comment=20 for what > it > will be used. > The I dug deeper and found that its base-classes also offer not > functionality at all. I didn=B4t check whether these classes were used=20 at all > > but I pretty sue I could eliminate every use within an hour without > reducing > the effectiveness of the program itself. Yep, I started to code it to do the AI of the game, when I realized=20 that to code the AI we need the RP running, so I stopped it, but it=20 could be useful when we retake it. You don't need to clean it, there are just a skeleton for future=20 development.=20 I understand that I should have explained all of this on the code, but=20 when I code I do a maratonian sesion code, that is, I start at 9AM and=20 finish when I am almost asleep. I should comment more the code, but=20 really I think that there is code that explain itself. > Hell, I=B4d like to do some development&debugging but I always find=20 myself > cleaning up stuff and trying to read someone elses mind. Yes, I agree. > Every additional file makes the project more complex and here I find=20 six > absolutely useless files. > > > The true reason for the Arianne-NG branch is that you=B4ve been going=20 too > fast > and forgotten to clean up behind you. Maybe, also because we have a new design that is better than the=20 actual one, and I have though that the new directory structure would=20 be more maintenable that the actual one. > > I simply have the concept so clear on my head that I have no > > need for a 'proof of concept'. > q.e.d.: Proof of concept failed > You=B4re wrong and (unintentionally) blocking a _team_ effort. Yes, I see. I am really sorry. I will try to write more docs. But I would requiere more collaboration, for example, I proposed a=20 roadmap for RP, and I get no reply, so for the sake of the project I=20 have to decide that all agree with it. Instead of coding on the new branch I will stop myself, for writting=20 the docs that are needed for the development. Soon I will get IRC, I expect that this help to resolve that kind of=20 problems. Mensaje citado por: Alberto ngc <bartyvk@...>: > Hi Arianne_General; It's my first message to the list, and i am very happy > to talk with all of you. > > My problem is that i know a bit of C & C++ programming, but i dont know > where to start when i take the code and start reading. And i am worried > with > this because i want to help and take part of the project of arianne. > > My petition; can somebody let me url's of c programming tutorials (better > if > they are spanish) to learn, at least, how works a client-server program? > and, if somebody can guide me through the arianne code? > > Thanks to all Bienvenido, For code, the best is that you take a look at the design Docs, the important of code is that you get the idea not how we do it, when you understand the idea, just browse the code, it is very simple, as I don't use all the features of C++ ( Yes, I know, I will change this :-) ). Anyway, I will look for a great C++ tutorial on English :(, and I will give you the url. Regards, Miguel
http://sourceforge.net/p/arianne/mailman/arianne-general/?viewmonth=200102&viewday=8
CC-MAIN-2016-07
refinedweb
4,923
73.78
Copyright ©2001 W3C® (MIT, INRIA, Keio), All Rights Reserved. W3C liability, trademark, document use and software licensing rules apply. (see . This document is the first published Working Draft of XSLT 2.0. It is published in order to provide the XSLT user community with a preview of the revised language specification, and to obtain feedback.SLT 1.0 Recommendation [XSLT 1.0] published on 16 November 1999. The changes made in this document are intended to meet the requirements for XSLT 1.1 and XSLT 2.0 described in [XSLT 1.1 Requirements] and [XSLT 2.0 Requirements] and to incorporate fixes for errors that have been detected in XSLT 1.0. A summary of the changes since XSLT 1.0 is included in [J Changes from XSLT 1.0]. XSLT 2.0 is designed to be used together with XPath 2.0, which has been developed by the W3C XSL Working Group in collaboration with the XML Query Working Group. The current specification of XPath 2.0 can be found in [XPath 2.0]. NOTE: This specification supersedes XSLT 1.1 (see [XSLT 1.1 WD]), which was never developed beyond the Working Draft stage. Comments on this specification may be sent to xsl-editors@w3.org; archives of the comments are available, and it is possible to subscribe to the list. Public discussion of XSL, including XSL Transformations, takes place on the XSL-List mailing list. The English version of this specification is the only normative version. However, for translations of this document, see. A list of current W3C Recommendations and other technical documents can be found at. This specification has been produced as part of the W3C Style activity. This specification defines the syntax and semantics of the XSLT language. A transformation in the XSLT language is expressed in the form of a stylesheet, whose syntax is well-formed XML [XML] conforming to the Namespaces in XML Recommendation [XML Names]. A stylesheet generally includes. NOTE:. The software responsible for transforming a source document into a result document is referred to as the processor. This is sometimes expanded to XSLT processor to avoid any confusion with other processors, for example an XML processor. A specific product that performs the functions of an XSLT processor is referred to as an implementation. A transformation expressed in XSLT describes rules for transforming a source tree into a result tree. The transformation is achieved by a set of template rules. A template rule associates a pattern, which matches nodes in the source document, with a content constructor, which can be evaluated to produce part of the result tree. The structure of the result tree can be completely different from the structure of the source tree. In constructing the result tree, nodes from the source tree can be filtered and reordered, and arbitrary structure can be added. This mechanism allows a stylesheet to be applicable to a wide class of documents that have similar source tree structures. NOTE: More generally, a transformation can process several source trees and produce several result trees. A stylesheet may consist of several stylesheet modules, contained in different XML documents. One of these functions as the principal stylesheet module. The complete stylesheet is assembled by finding the stylesheet modules referenced directly or indirectly from the principal stylesheet module using xsl:include and xsl:import elements: see [2.8.1 Stylesheet Inclusion] and [2.8.2 Stylesheet Import]. This document does not specify how a transformation is initiated. The transformation process takes as its main input a source tree referred to as the principal source document. The structure of this tree is described in [Data Model], augmented by additional specifications in this document (see [3 Data Model]). Issue (document-collection): There are suggestions that it should be possible to supply a collection of source documents as input. In this case, it is unclear whether any one of these would be specially identified as the principal source document, or whether the transformation would be applied to each of them independently. In addition the transformation requires identification of the principal stylesheet module, and optionally, values for one or more stylesheet parameters (see [6.2 Global Variables and Parameters]). A stylesheet can process further source documents in addition to the principal source document. These additional documents can be loaded using the document function (see [14.1 Multiple Source Documents]), or they can be supplied as stylesheet parameters (see [6.2 Global Variables and Parameters]), or as the result of an extension function (see [16.1 Extension Functions] NOTE: Sometimes it is useful to write an XSLT stylesheet that does not require input from a principal source document. However, the semantics of the language require that a principal source document is always present. Implementors may provide a mechanism that supplies a default document, containing just a document node with no children, as the principal source document to be used in the absence of any other source document. A stylesheet contains a set of template rules. A template rule has two parts: a pattern which is matched against nodes in the source tree and a content constructor which is evaluated to produce a sequence of nodes: these nodes are typically used to form part of the result tree. This allows a stylesheet to be applicable to a wide class of documents that have similar source tree structures. A content constructor is evaluated for a particular node in the source tree, to create part of the result tree. A content constructor can contain elements (called literal result elements) and text nodes that specify part of the result structure directly. A content constructor can also contain elements from the XSLT namespace that are instructions for creating parts of the result tree. When a content constructor is evaluated, each instruction is evaluated to produce a sequence of zero or more nodes; the result of the content constructor as a whole is a sequence of nodes formed by concatenating the results of each of the instructions and literal results nodes that it contains, in the order that they appear in the content constructor. The resulting nodes are typically attached as children to an element or document node constructed by the instruction that contains the content constructor, thus forming a tree. During this process, adjacent text nodes will be merged into a single text node. When a content constructor is evaluated to create new nodes, the tree to which these nodes are added is referred to as the current result tree. When the transformation is initiated, a result tree is created, and becomes the current result tree. This tree is referred to as the principal result tree. Various XSLT instructions, (including xsl:variable and xsl:result-document) establish a new current result tree for the nodes created by the content constructor that they contain. The elements occurring within a content constructor are classified as being either literal result elements or instructions. If the element is in the XSLT namespace, or in a namespace designated as an extension namespace, then it is an instruction. Otherwise, it is a literal result element. The element syntax summary notation used to describe the syntax of XSLT-defined elements is described in [20 Notation], and a full list of these elements is provided in [C Element Syntax Summary] Instructions can select and process other nodes in a source tree. The typical way of processing a source node is to create a sequence of result nodes by finding the applicable template rule and evaluating its content constructor. Note that source nodes are processed only if they are selected by such an instruction. Instructions that select nodes from the source document, or that derive information from these nodes for inclusion in the result document, always access the source tree by means of an Expression in the XPath language, described in [XPath 2.0]. A stylesheet written to use XSLT 2.0 will contain expressions whose syntax and semantics are defined by XPath 2.0 (but see also [2.6 Backwards-Compatible Processing] and [2.7 Forwards-Compatible Processing]). Execution of a stylesheet against the principal source document proceeds by creating a document node for the principal result tree, finding the template rule that matches the document node of the source tree, and evaluating the content constructor of this template rule to create the children of the new document node. By the time evaluation of this content constructor is complete, these children will typically each act as the parent of further result nodes, so a complete tree is constructed. It is also possible for the execution of a stylesheet to start at a node in the source document other than the document node, determined by the implementation-specific mechanism for invoking a stylesheet. In this situation, the complete tree remains available for processing by the stylesheet; the only difference is the choice of the node used when applying the first template rule. In the process of finding the applicable template rule, more than one template rule may have a pattern that matches a given node. However, only one template rule will be applied. The method for deciding which template rule to apply is described in [5.4 Conflict Resolution for Template Rules]. A single content constructor by itself has considerable power. It can create structures of arbitrary complexity; it can pull string values out of arbitrary locations in the source tree; and it can generate structures that are repeated according to the occurrence of nodes in the source tree. For simple transformations where the structure of the result tree is independent of the structure of the source tree, a stylesheet can often consist of only a single literal result element, containing a content constructor which functions as a template for building the complete result tree. Transformations on XML documents that represent data with a regular and predictable structure (for example, data extracted from a relational database) are often of this kind. XSLT allows a simplified syntax for such stylesheets (see [2.5 Simplified Stylesheet Modules]). When a content constructor is evaluated, the processor keeps track of which nodes are being processed by means of a set of implicit variables referred to collectively as the focus. More specifically, the focus consists of the following five values: The context item is the item currently being processed. An item (see [Data Model]) is either a simple value (such as an integer, date, or string), or a node. If the context item is a node, then it will always be a node in the context document. The initial context node is the same as the context document. If the context item is a node (as distinct from a simple value such as an integer), then it is also referred to as the context node. The context node is not an independent variable, it changes whenever the context item changes. When the context item is a simple value, there is no context node: its value is an empty sequence. The context node is returned by the XPath expression self::node(), and it is used as the starting node for all relative path expressions. The context document is the source document currently being processed. This is initially set to the document node of the principal source document. It changes when instructions such as xsl:apply-templates and xsl:for-each are used to process nodes in a document other than the principal source document. When such an instruction is processing a node, the context document is the document containing that node. When such an instruction is processing a simple value (an item that is not a node), the context document is the same as the context document for the content constructor containing the xsl:apply-templates or xsl:for-each instruction. The context document is returned by the XPath expression / (slash), and it is used as the starting node for all absolute path expressions. On completion of an instruction which changes the focus (such as xsl:apply-templates or xsl:for-each), the focus reverts to its previous value. The description above gives an outline of the way the focus works. Detailed rules for the effect of each instruction are given separately with the description of that instruction. In the absence of specific rules, an instruction uses the same focus as its parent instruction. Sometimes the focus is based on a single node rather than a sequence. A singleton focus based on a node N has the context item (and therefore the context node) set to N, the context document set to the document containing N, and the context position and context size both set to 1 (one). As explained in the previous section, an XSLT stylesheet describes a process that constructs a result tree from a source tree. The stylesheet does not describe how the source tree is constructed. Frequently an implementation will operate in conjunction with an XML parser (or more strictly, in the terminology of [XML], an XML processor), to build the source tree from an input XML document. An implementation may also provide an application programming interface allowing the tree to be constructed directly, or allowing it to be supplied in the form of a DOM Document object (see [DOM2]). This is outside the scope of this specification. Users should be aware, however, that since the input to the transformation is a tree conforming to the data model. A frequent requirement is to output the result tree as an XML document (or in other formats such as HTML). This process is referred to as serialization. Like parsing, serialization is not part of the transformation process, and it is not required that an XSLT processor should be able to perform serialization. However, for pragmatic reasons, this specification describes a declaration (the xsl:output element, see [18 Serialization]) which allows a stylesheet to specify the desired properties of a serialized output file. Implementations that do not serialize the result tree are allowed to ignore this declaration. Because it is a common requirement to perform a transformation on a document while retaining lexical characteristics such as CDATA section boundaries, entity references, and the like, an appendix to this specification (see [F Representation of Lexical XML Constructs]) describes a way in which these constructs can be represented within the data model by means of elements in a special namespace. If such a representation is chosen, the tree is transformed in the same way as any other tree. The process of constructing such a tree is something that happens before XSLT transformation starts, and the process of interpreting such a tree and reconstituting the lexical representation is part of the serialization process. Neither of these processes is properly within the scope of XSLT transformation, and therefore, this specification places no requirement on an XSLT processor to support this representation of lexical properties. XSLT provides two "hooks" for extending the language, one hook for extending the set of instruction elements used in content constructors and one hook for extending the set of functions used in XPath expressions. These hooks are both based on XML namespaces: see [16 Extensibility and Fallback] for further details. Extension instructions and extension functions defined according to these rules may be provided by the implementor of the XSLT processor, and the implementor may also provide facilities to allow users to create further extension instructions and extension functions. This specification defines how extension instructions and extension functions are invoked, but does not define how new extension instructions and extension functions are to be implemented., and it must not use the stylesheet to produce a result tree. A static error must be signaled even if it occurs in a part of the stylesheet that is never evaluated. There is an exception to this rule when the stylesheet specifies forwards-compatible behavior (see [2.7. An error that is not detected until a source document is being transformed is referred to as a dynamic error. In many cases, this specification allows an implementation to decide whether dynamic errors should be signaled (by reporting the error condition and terminating execution) or whether recovery action should be taken. If the implementation does choose to take recovery action, it must take the recovery action defined is also allowed to take other action, such as logging a warning message. Because different implementations may optimize execution of the stylesheet in different ways, the detection of dynamic errors will not necessarily be consistent between one implementation and another. In cases where an implementation is able to produce the result tree without evaluating a particular construct, the implementation is never required to evaluate that construct solely in order to determine whether doing so causes a dynamic error. For example, if a variable is declared but never referenced, an implementation can report [6.3 Circular Definitions]. reported as a dynamic error. An implementation may also, optionally, report a type error as a static error, even if it occurs in part of the stylesheet that is never evaluated, provided it can establish that execution of a particular construct would never succeed. For example, the following construct contains a type error, because 42 is not allowed as an operand of the xsl:apply-templates instruction. An implementation may optionally report this as a static error, even though the offending instruction will never be evaluated, and the type error would therefore never be reported as a dynamic error. <xsl:if <xsl:apply-templates </xsl:if> If more than one error arises, an implementation is not required to signal any errors other than the first one that it detects. This applies both to static errors and to dynamic errors. An implementation is allowed to signal more than one error, but if any errors have been signaled, it must not produce a result tree. Everything said above about error handling applies equally to errors in evaluating XSLT instructions, and errors in evaluating XPath expressions. Static errors and dynamic errors may occur in both cases. If a transformation has successfully produced a result tree,, this specification imposes no mandatory requirements on the way in which an implementation handles serialization errors: see [18 Serialization]. A stylesheet consists of one or more stylesheet modules, each one forming all or part of a well-formed XML document. There are three kinds of stylesheet module: xsl:stylesheetor xsl:transformelement as its document element (see [2.4 Stylesheet Element]). xsl:stylesheetor xsl:transformelement embedded within another XML document, typically the principal source document (see [2.9 Embedded Stylesheet Modules]). Issue (embedded-simplified-stylesheets): This classification would imply that embedded stylesheet modules cannot be simplified stylesheets. The Working Group does not intend to disallow use of embedded simplified stylesheet modules, and will re-work the text before final publication to permit this combination. The XSLT namespace has the URI. It is used to identify elements, attributes, and other names that have a special meaning defined in this specification. NOTE: The 1999in the URI indicates the year in which the URI was allocated by the W3C. It does not indicate the version of XSLT being used, which is specified by attributes (see [2.4 Stylesheet Element] and [2.5 Simplified Stylesheet Modules]). XSLT processors must use the XML namespaces mechanism [XML Names] to recognize elements and attributes from this namespace. Elements from the XSLT namespace are recognized only in the stylesheet and not in the source document. The complete list of XSLT-defined elements is specified in [C Element Syntax Summary]. Implementations must not extend the XSLT namespace with additional elements or attributes. Instead, any extension must be in a separate namespace. Any namespace that is used for additional instruction elements must be identified by means of the extension instruction mechanism specified in [16. An element from the XSLT namespace may have any attribute not from the XSLT namespace, provided that the expanded-name (see [XPath 2.0]) of the attribute has a non-null namespace URI. The presence of such attributes must not change the behavior of XSLT elements and functions defined in this document or in the XPath specification, though they may be used to modify the behavior of extension functions and extension instructions. Thus, an implementation is always free to ignore such attributes, and must ignore such attributes without giving an error if it does not recognize the namespace URI. Such attributes can provide, for example, unique identifiers, optimization hints, or documentation. For example, the following code might be used to provide a hint to a particular implementation that a call to an extension function has side effects: <xsl:value-of [ERR001] It is a static error for an element from the XSLT namespace to have an attribute with an expanded-name that has a null namespace URI (i.e. an attribute with an unprefixed name). There are a number of standard attributes that may appear on any XSLT element: specifically version, exclude-result-prefixes, extension-element-prefixes, and default-xpath-namespace., or xsl:default-xpath-namespace. It is recommended that these attributes should also be permitted on extension instructions, but this is at the discretion of the implement, and any descendant elements of the element they appear on, together with attributes of those descendant elements. The two forms with and without the XSLT namespace have the same effect; the XSLT namespace is used for the attribute if and only if its parent element is not in the XSLT namespace. In the case of [xsl:]version and [xsl:]default-xpath-namespace the value can be overridden by a different value for the same attribute appearing on a descendant element. The effective value of the attribute for a particular stylesheet element is determined by the innermost containing element on which the attribute appears. In the case of [xsl:]exclude-result-prefixes and [xsl:]extension-element-prefixes the values are cumulative. For these attributes, the value is a whitespace-separated list of namespace prefixes, and the effective value for an element is the combined set of prefixes that appear in this attribute for that element and any of its ancestor elements. Again, the two forms with and without the XSLT namespace are equivalent. Because these attributes may appear on any XSLT element, they are not listed in the syntax summary of each individual element. Instead they are listed and described in the description of the xsl:stylesheet and xsl:transform elements only. This reflects the fact that these attributes are often used on the xsl:stylesheet element, [2.6 Backwards-Compatible Processing] and [2.7 Forwards-Compatible Processing] [xsl:]default-xpath-namespace: see [4.4 Unprefixed Names in Expressions and Patterns]. [xsl:]exclude-result-prefixes: see [8.1.2 Namespace Nodes for Literal Result Elements]. [xsl:]extension-element-prefixes: see [16.2 Extension Instructions]. <xsl:stylesheet id = id extension-element-prefixes = tokens exclude-result-prefixes = tokens version = number default-xpath-namespace = uri> <!-- Content: (xsl:import*, top-level-elements) --> </xsl:stylesheet> <xsl:transform id = id extension-element-prefixes = tokens exclude-result-prefixes = tokens version = number default-xpath-namespace = uri> <!-- Content: (xsl:import*, top-level-elements) --> <. [ERR002] An xsl:stylesheet element must have a version attribute, indicating the version of XSLT that the stylesheet requires. [ERR003] The value of the version attribute must be a number. For this version of XSLT, the value should normally be 2.0. When the value is less than 2.0, backwards-compatible processing behavior is enabled (see [2.6 Backwards-Compatible Processing]). When the value is greater than 2.0, forwards-compatible behavior is enabled (see [2.7 Forwards-Compatible Processing]). [ERR004] An xsl:stylesheet element must have no text node children, other than text nodes consisting entirely of whitespace. An element occurring as a child of an xsl:stylesheet element is called a top-level element. Top-level elements fall into two categories: declarations, and user-defined data elements. Top-level elements whose names are in the XSLT namespace are declarations. Top-level elements in any other namespace are user-defined data elements (see [2.4.1 User-defined Data Elements]) The xsl:stylesheet element may contain the following types of declaration: xsl:import xsl:include xsl:attribute-set xsl:decimal-format xsl:destination xsl:function xsl:key xsl:namespace-alias xsl:output xsl:param xsl:preserve-space xsl:sort-key xsl:strip-space xsl:template xsl:variable to declarations, the xsl:stylesheet element may contain any element not from the XSLT namespace, provided that the expanded-name of the element has a non-null namespace URI. Such elements are referred to as user-defined data elements. [ERR005] It is a static error if the xsl:stylesheet element has a child element having a null namespace URI. The presence of a data element must not change the behavior of XSLT elements and functions defined in this document; for example, it is not permitted for a data element to specify that xsl:apply-templates should use different rules to resolve conflicts. Thus, an implementation is always free to ignore data elements, and must ignore a data element without giving an error if it does not recognize the namespace URI. Data elements can provide, for example, information used by extension instructions or extension functions (see [16 Extensibility and Fallback]), information about what to do with the result tree, information about how to obtain the source tree, optimization hints for the processor, metadata about the stylesheet, structured documentation for the stylesheet. [ERR006] A user-defined data element must not precede an xsl:import element within a stylesheet module. A simplified syntax is allowed for a stylesheet module that consists of only a single template rule for the document node. The stylesheet module may consist of just a literal result element (see [8.1 Literal Result Elements]). Such a stylesheet is equivalent to a standard stylesheet module whose xsl:stylesheet element contains a template rule containing the literal result element;> More formally, a simplified stylesheet module is equivalent to the standard stylesheet module that would be generated by applying the following transformation to the simplified stylesheet module: <xsl:stylesheet <xsl:template <xsl:element <xsl:attribute <xsl:value-of </xsl:attribute> <xsl:element <xsl:attribute/</xsl:attribute> <xsl:copy-of </xsl:element> </xsl:element> </xsl:template> </xsl:stylesheet> [ERR007] A literal result element that is the document element of a simplified stylesheet module must have an xsl:version attribute. This indicates the version of XSLT that the stylesheet requires. For this version of XSLT, the value should normally be 2.0; the value must be a NumericLiteral as defined in [XPath 2.0]. Other literal result elements may also have an xsl:version attribute. When the xsl:version attribute is numerically less than 2.0, backwards-compatible processing behavior is enabled (see [2.6 Backwards-Compatible Processing]). When the xsl:version attribute is numerically greater than 2.0, forwards-compatible behavior is enabled (see [2.7 Forwards-Compatible Processing]). The allowed content of a literal result element when used as a simplified stylesheet is the same as when it occurs within a content constructor. Thus, a literal result element used as the document element of a simplified stylesheet cannot contain declarations. An element enables backwards-compatible behavior for itself, its attributes, its descendants and their attributes if either it has an [xsl:]version attribute (see [2: It is constrained to use syntax permitted by XPath 1.0 It is guaranteed to return the same result as would be returned by XPath 1.0, after conversion of any variables that it references to the equivalent XPath 1.0 data type. This conversion is done as follows. Any numeric value is converted to the nearest XPath 1.0 number. Boolean values remain as booleans; any other simple value is converted to a string. [ERR008] If the value is an empty sequence or a sequence that consists entirely of nodes, then it is converted to a node-set; it is a dynamic error if the value is any other sequence of two or more items. The processor must signal the error. The result of the expression is converted to an XPath 2.0 value by representing any node-set as a sequence of nodes in document order. An XSLT 2.0 implementation is not obliged to support backwards-compatible behavior. [ERR009] If an implementation does not support backwards-compatible behavior, then it is a dynamic error if any element is evaluated that enables backwards-compatible behavior. The processor must signal the error. An element enables forwards-compatible behavior for itself, its attributes, its descendants and their attributes if it has an [xsl:]version attribute (see [2. Within a section of a stylesheet where forwards-compatible behavior is enabled, errors that would normally be static errors are treated instead as dynamic errors. This means that no error is reported unless the construct containing the error is actually evaluated. This means, for example, that when an element is processed with forwards-compatible behavior: if it is a top-level element and XSLT 2.0 does not allow such elements as top-level elements, then the element must be ignored along with its content; if it is an element in a content constructor and XSLT 2.0 does not allow such elements to occur in content constructors, then if the element is not evaluated, no error must be signaled, and if the element is evaluated, the processor must perform fallback for the element as specified in [16.2.3 Fallback]; if the element has an attribute that XSLT 2.0 does not allow the element to have or if the element has an optional attribute with a value that XSLT 2.0 does not allow the attribute to have, then the attribute must be ignored. if an attribute of the element contains an XPath expression that does not match the allowed syntax of an XPath 2.0 expression, or one that calls a function with an unprefixed name that is not defined in XPath 2.0 or XSLT 2.0, or that calls such a function with the wrong number or type of arguments, the error must not be signaled unless the expression is actually evaluated. Thus, any XSLT 2.0 processor must be able to process the following stylesheet without error, although the stylesheet includes elements from the XSLT namespace that are not defined in this specification: <xsl:stylesheet <xsl:template <xsl:choose> <xsl:when <xsl:exciting-new-17.0-feature/> </xsl:when> <xsl:otherwise> <html> <head> <title>XSLT 17.0 required</title> </head> <body> <p>Sorry, this stylesheet requires XSLT 17.0.</p> </body> </html> </xsl:otherwise> </xsl:choose> </xsl:template> </xsl:stylesheet> NOTE: If a stylesheet depends crucially on a declaration introduced by a version of XSLT after 2.0, then the stylesheet can use an xsl:messageelement with terminate="yes"(see [15="system-property('xsl:version') <: <!-- Category: declaration --> <xsl:include href = uri-reference /> A stylesheet module may include another stylesheet module using an xsl:include declaration. The xsl:include declaration has an href attribute whose value is a URI reference identifying the stylesheet module to be included. A relative URI is resolved relative to the base URI of the xsl:include declaration (see [Data Model]). [ERR010] The xsl:include element is allowed only as a top-level element.. The included stylesheet module may be any of the three kinds of stylesheet module: a standard stylesheet module, a simplified stylesheet module, or an embedded stylesheet module. Issue (include-fragment): Is it permitted for the URI reference used in xsl:includeand xsl:importto include a fragment identifier, to reference an embedded stylesheet module? And if so, what is the form of the fragment identifier? This isn't clear at 1.0. [ERR011] It is is the same as including it (see [2.8.1 Stylesheet Inclusion]) except that template rules and other declarations in the importing stylesheet take precedence over template rules and declarations in the imported stylesheet; this is described in more detail below. The xsl:import declaration has an href attribute whose value is a URI reference identifying the stylesheet to be imported. A relative URI is resolved relative to the base URI of the xsl:import element (see [Data Model]). [ERR012] The xsl:import declaration is allowed only as a top-level element. [ERR013] order of import precedence (lowest first) is D, B, E, C, A. In general, a declaration with higher import precedence takes precedence over a declaration with lower import precedence. This is defined in detail for each kind of declaration. [ERR014] It is a static error if a stylesheet module directly or indirectly imports itself. NOTE: The case where a stylesheet with a particular URI is imported in multiple places is not treated specially. The resulting stylesheet will contain multiple declarations that are identical in content but that differ in their import precedence. A standard stylesheet module is a complete XML document with the xsl:stylesheet element as its document element. However, a stylesheet module of type ID. The same requirement typically applies if the identifier is to be used as a fragment identifier in a URI reference. The following example shows how the xml-stylesheet processing instruction [XML Stylesheet] can be used to allow a source or that may be included or imported into a stylesheet that is so embedded typically needs to contain a template rule that specifies that xsl:stylesheetelements are to be ignored. NOTE: The above example uses the pseudo-attribute type="text/xml"in the xml-stylesheetprocessing instruction to denote an XSLT stylesheet. This usage was defined provisionally in XSLT 1.0, and is subject to change. In the absence of a registered media type for XSLT stylesheets, some vendors' products have adopted different conventions, notably type="text/xsl". NOTE: Support for the xml-stylesheetprocessing instruction is not a requirement for conformance with this Recommendation. The data model used by XSLT is as defined in [Data Model], with the additions described in this section. XSLT operates on source, result and stylesheet documents using the same data model. Features of a source XML document that are not represented in the tree defined by the data model will have no effect on the operation of an XSLT stylesheet. Examples of such features are entity references, CDATA sections, character references, whitespace within element tags, and the choice of single or double quotes around attribute values.. Ed. Note: This section can be removed when it is confirmed that the data model permits "well-balanced" trees. At the time of writing, this is still an open issue in the data model (Issue 0041). The normal restrictions on the children of the document node are relaxed for the result tree and for temporary trees constructed during the evaluation of the stylesheet. The document node of such a tree may have any sequence of nodes as children that would be possible for an element node. In particular, it may have text node children, and any number of element node children. When written out using the XML output method (see [18 Serialization]), it is possible that a result tree will not be a well-formed XML document; however, it will always be a well-formed external general parsed entity. For example, a stylesheet might produce the following output. This is a well-formed external general parsed entity, but it is not a well-formed XML document: <?xml version="1.0" encoding="iso-8859-1"?>A <i>fine</i> mess! When a source tree is created by parsing a well-formed XML document, the document node of the source tree will automatically satisfy the normal restrictions of having no text node children and exactly one element child. When a source tree is created in some other way, for example by using the DOM, the usual restrictions are relaxed for the source tree as for the result tree. Ed. Note: Unparsed entities don't currently appear in the data model, though we have asked for them to be added. This section can be deleted when the Data Model is updated to support unparsed entities. The document node has a mapping that gives the URI for each unparsed entity declared in the document's DTD. The URI is generated from the system identifier and public identifier specified in the entity declaration. The processor may use the public identifier to generate a URI for the entity instead of the URI specified in the system identifier. If the]. Issue (whitespace-and-schema): If an element has element content, as defined in the schema or DTD, the default should be to strip whitespace nodes rather than preserving them. The source document supplied as input to the transformation process may contain whitespace nodes (that is, text nodes consisting solely of whitespace characters) that are of no interest, and that do not need to be retained by the transformation. Conceptually, such whitespace nodes may be removed from the tree before the transformation commences. This process is referred to as whitespace stripping. The source tree itself must not be modified: the processor may implement whitespace stripping either by creating a copy of the tree from which the whitespace nodes have been removed, or by working on a virtual tree in which the whitespace nodes are treated as if they were absent. The stripping process takes as input a set of element names whose child whitespace nodes must be preserved. The stripping process is applied to both stylesheets and source documents, but the set of whitespace-preserving element names is determined differently for stylesheets and for source documents. NOTE: Where multiple transformations are to be applied to the same source document, a useful optimization is to do the whitespace stripping only once. Implementations may therefore allow whitespace stripping to be controlled as a separate operation from the rest of the transformation process.. The xml:space attributes are not removed from the tree. NOTE: This implies that if an xml:spaceattribute is specified on a literal result element, it will be included in the result. For stylesheets, the set of whitespace-preserving element names consists of just xsl:text. Processing instructions and comments in a stylesheet module are ignored: the stylesheet module is treated as if the processing instructions and comments were not there. This also means that sibling text nodes that are separated by a processing instruction or comment in a stylesheet module are concatenated into a single text node; and a text node is classified as a whitespace text node for the purpose of whitespace stripping only after this concatenation has taken place. The content model for some XSLT elements (for example xsl:stylesheet and xsl:choose) does not permit text nodes as children of these elements. If the xml:space="preserve" attribute is used to suppress the stripping of whitespace text nodes within such elements, then any whitespace used for the layout of such elements will be retained in the stylesheet tree in the form of whitespace text nodes. Such text nodes should not be reported as an error. [ERR015] Within an XSLT element that is required to be empty, any content other than comments or processing instructions, including any whitespace-only text node preserved using the xml:space="preserve" attribute, is a static error. <!-- Category: declaration --> <xsl:strip-space elements = tokens /> <!-- Category: declaration --> <xsl:preserve-space elements = tokens /> For source documents, the set of whitespace-preserving element names is specified by xsl:strip-space and xsl:preserve-space declarations. Whether an element name is included in the set of whitespace-preserving names is determined by the best match amongst; an element name matches an xsl:strip-space or xsl:preserve-space element if it matches one of the NameTests. An element matches a NameTest if and only if the NameTest would be true for the element as an XPath node test. When more than one xsl:strip-space and xsl:preserve-space element matches, the best matching element is determined by the best matching NameTest. This is determined in the same way as with template rules: First, any match with lower import precedence than another match is ignored. Next, any match that has a lower default priority than the default priority of another match is ignored. [ERR016] It is an dynamic error if this leaves more than one match. The processor must either signal the error, of must recover by choosing, from amongst the matches that are left, the one that occurs last in declaration order. NOTE: A source document is supplied as input to the XSLT processor in the form of a tree. Nothing in this specification states that this tree must be built by parsing an XML document; nor does it state that the application that constructs the tree is required to treat whitespace in any particular way. The provisions in this section relate only to whitespace text nodes that are present in the tree supplied as input to the processor. In particular, the processor cannot preserve whitespace text nodes unless they were actually present in the supplied tree. Ed. Note: The process of namespace fixup would ideally be described along with the node construction functions defined in the XPath 2.0 data model. Issue (shared-namespace-node-fixup): This section needs to be revised if namespace nodes are to be held at document level. In a tree constructed by parsing an XML document, the following constraints relating to namespace nodes will be satisfied: If an element node has an expanded-name with a non-null namespace URI, then that element node will have at least one namespace node whose string-value is the same as that namespace URI. If an attribute node has an expanded-name with a non-null namespace URI, then the parent element of that attribute will have at least one namespace node whose string-value is the same as that namespace URI and whose expanded-name has a non-empty local part. If an element node has a namespace node with an expanded-name with a non-empty local part, then every child element of that element will also have a namespace node with that expanded-name (possibly with a different string-value). Every element has a namespace node whose expanded-name has local-part xml and whose string-value is. However, when a tree is being constructed as the result of an XSLT transformation, these constraints might not be satisfied unless special action is taken. In particular, since xsl:element and xsl:attribute instructions do not create namespace nodes, they will often cause these constraints not to be satisfied. The process of namespace fixup modifies a tree by adding namespace nodes so that it satisfies all constraints affecting namespace nodes. What namespace nodes are added and where they are added by namespace fixup is implementation-dependent, provided that the resulting tree satisfies the constraints and provided that all namespaces nodes in the resulting tree are allowable, where a namespace node is allowable for an element E if any of the following conditions applies: The namespace node was in the tree before namespace fixup. The local-part of the expanded-name of the namespace node is xmland its string-value is. The namespace node has a string-value equal to the namespace URI of the expanded-name of element E. The namespace node has a string-value equal to the namespace URI of the expanded-name of an attribute of element E; this applies only if the local part of the expanded-name of the namespace node is non-empty. Element E has a parent element with a namespace node that is allowable and that has the same expanded-name and same string-value as the other namespace node; this applies only if the local part of the expanded-name of the namespace node is non-empty. Namespace fixup must not result in an element having multiple namespace nodes with the same expanded-name. Namespace fixup is performed in two situations: It is applied to a result tree, before the result tree is made available to the calling application (whether by serialization or otherwise: see [18 Serialization]). It is applied to a temporary tree, before the temporary tree is made available for processing by stylesheet instructions. (see [6.1 Values of Variables and Parameters]). There is no requirement to perform namespace fixup for the principal source document, nor for any document loaded using the document function, nor for any document supplied as the value of a global parameter, nor for any document returned by an extension function. [ERR017] It is a dynamic error if such a document does not already satisfy the constraints listed above . The processor may signal the error, or may recover by performing namespace fixup, or may produce implementation-defined results. If an implementation supports the disable-output-escaping attribute of xsl:text and xsl:value-of (see [18.5 Disabling Output Escaping]), then the data model for trees constructed by the processor is augmented with a boolean value representing the value of this property. Conceptually, each character in a text node on a result tree has a boolean property indicating whether the serializer should disable the normal rules for escaping of special characters (for example, outputting of & as &) in respect of this character. This property is preserved when a text node is copied using xsl:copy or xsl:copy-of. NOTE: There are many ways an implementation can avoid the overhead of actually storing a boolean flag with every character. The name of an internal XSLT object, specifically a named template (see [7.1 Named Templates]), a mode (see [5.6 Modes]), an attribute set (see [7.2 Named Attribute Sets]), a key (see [14.3 Keys]), a named sort specification (see [12.3 Using Named Sort Specifications]), a decimal-format (see [14.4 Number Formatting]), a variable or parameter (see [6 Variables and Parameters]), a stylesheet function (see [7.3 Stylesheet Functions]), or a named output definition (see [18 Serialization]), is specified as a QName. A QName is always written in the form NCName (":" NCName)?, that is, a local name optionally qualified by a namespace prefix. When two QNames are compared, however, they are considered equal if the corresponding expanded QNames are the same. An expanded QName is a pair of values containing a namespace URI and a local name. A QName is expanded by replacing the namespace prefix with the corresponding namespace URI, from the namespace declarations that are in scope at the point where the QName is written. Two expanded QNames are equal if the namespace URIs are the same and the local names are the same. QNames always occur either. Issue (leading-colon-in-qname): The current XPath grammar allows a QName to contain a leading colon. This leading colon is not considered part of the QName as far as XSLT is concerned, and is not permitted in contexts other than an XPath expression. (as defined by a namespace declaration of the form xmlns="some.uri") is not used for unprefixed names. In the case of an unprefixed QName used as a NameTest within an XPath expression (see [4.2 Expressions]) or within a pattern (see [4.3 Patterns]), the namespace to be used in expanding the QName may be specified by means of the [xsl:]default-xpath-namespace attribute, as specified in [4.4 Unprefixed Names in Expressions and Patterns]. [ERR018] In the case of a QName used as the value of an attribute in the stylesheet, or appearing within the text of an XPath expression in the the stylesheet, it is a static error if the defining element has no namespace node whose name matches the prefix of the QName. [ERR019] In the case of a QName produced by evaluating an XPath expression, it is a dynamic error if the defining element has no namespace node whose name matches the prefix of the QName. The error is a dynamic error even if the value of the expression is known statically, for example if the QName is written as a string literal. The required action depends on the defining element. XSLT uses the expression language defined by XPath 2.0 [XPath 2.0]. Expressions are used in XSLT for a variety of purposes including: An expression must match the XPath production Expr. An XPath expression may occur as the value of certain attributes on XSLT-defined elements, and also within curly braces in attribute value templates. [ERR020] It is a static error if the value of such an attribute, or the text between curly braces in an attribute value template, does not match the XPath production Expr, or if it fails to satisfy other static constraints defined in the XPath specification, for example that all variable references must refer to variables that are in scope. The context within a stylesheet where an XPath expression may appear determines the required type of the expression. The required type indicates the data type of value that the expression is expected to return. [ERR021] It is a type error if an XPath expression contains a type error, or if the type of the XPath expression is incompatible with the required type. The processor must either signal a type error as a static error, or must attempt to recover by converting the result of the expression to the required type using the standard type conversion rules; if conversion is not possible under these rules, the processor must signal a dynamic error Issue (type-compatibility): We need to provide a more rigorous definition of what it means for the supplied value to be compatible with the required type. The context for evaluation of an XPath expression is determined according to the following rules. The context has two parts: the static context, and the dynamic expression evaluation context. The static context depends on the element in the stylesheet that contains the attribute holding the XPath expression ("the containing element") as follows: The type exception policy is, by default, flexible, meaning that the system attempts to convert supplied values to the required type when possible. An implementation may provide the alternative policy, strict, as a user-selectable option. The in-scope namespaces are the namespace declarations that are in scope for the containing element. The default namespace for element names is the namespace defined by the innermost [xsl:]default-xpath-namespace attribute, as described in [4.4 Unprefixed Names in Expressions and Patterns]. The default namespace for function names is the namespace, defined in [Functions and Operators]. This means that it is not necessary to declare this namespace in the stylesheet, nor to use the prefix xf used in the specification of the core functions. Ed. Note: The current draft of the Functions & Operators document includes an issue suggesting this namespace may change. The in-scope type definitions includes the built-in types of XML Schema (see [XML Schema]), plus any types imported using implementation-defined mechanisms. The in-scope variables are the variable-bindings that are in scope for the containing element (see [6 Variables and Parameters]). The in-scope functions are the core functions defined by XPath, the additional functions defined in this specification, the stylesheet functions defined in the stylesheet, plus any extension functions bound using implementation-defined mechanisms (see [16 Extensibility and Fallback]). [ERR022] It is a dynamic error for an expression to call any function that is not included in the in-scope functions. The processor must signal the error, but only if the function call is actually evaluated. The in-scope collations are implementation-defined. Issue (stylesheet-defined-collations): Should the stylesheet define names of collations? If so, how are they to be described? Should we encourage portability by providing some indirection between the collation name and the underlying collation? But if this is to aid portability, there needs to be a way of selecting different mappings based on the XSLT implementation. The default collation is implementation-defined. The base URI is the base URI of the containing element. The evaluation context, which includes the focus, is determined as follows: Where the containing element is an instruction or a literal result element, the focus is established as follows. In other cases, the rules are given for the specific containing element. A template rule identifies the nodes to which it applies by means of a pattern. As well as being used in template rules, patterns are used for numbering (see [9 Numbering]), for grouping (see [13 Grouping]), and for declaring keys (see [14.3 Keys]). evaluating this expression with respect to some possible context. Here are some examples of patterns: para matches any para element * matches any element chapter|appendix matches any chapter element and any appendix element olist/item matches any item element with an olist parent appendix//para matches any para element with an appendix ancestor element / matches the document node of any source document text() matches any text node node() matches any node other than an attribute node, namespace node, or document node id("W11") matches the element with unique ID W11 para[1] matches any para element that is the first para child element of its parent item[position() mod 2 = 1] matches node [ERR023]StepExpr that uses only the child or attribute axes. Patterns may also use the // operator, and they may start with an id or key function call provided its arguments are string literals. Predicates in a pattern (the construct enclosed between square brackets) can contain arbitrary XPath expressions in the same way as predicates in a path expression. If a pattern occurs in part of the stylesheet where backwards compatible behavior is enabled (see [2.6 Backwards-Compatible Processing]), then the pattern is restricted to use the syntax for patterns defined in XSLT 1.0, and will match a node if and only if it would have matched that node under the rules defined in XSLT 1.0. The constructs NodeTest, StringLiteral, and Expr are part of the XPath expression language, and are defined in [XPath 2.0]. The meaning of a pattern is defined formally as follows. To determine whether a node N matches a pattern PAT, evaluate the expression //(PAT) with a singleton focus based on N. If the result is a sequence of nodes that includes N, then node N matches the pattern; otherwise node N does not match the pattern. This expression is constructed by textually inserting the pattern PAT exactly as written in the stylesheet. For example, p matches any p element, because a p element will always be present in the result of evaluating the expression //(p). Similarly, / matches a document node, and only a document node, because the result of the expression //(/) when applied using a particular document as context document returns only the document node of that document. NOTE:can be examined from right to left. A node will only match this pattern if it is a sectionelement; and then, only if its parent is a chapter; and then, only if the parent of that chapteris a book. When the pattern uses the //operator, one can still read it from right to left, but this time testing the ancestors of a node rather than its parent. For example appendix//sectionmatches every sectionelement that has an ancestor appendixelement. The formal definition, however, is useful for understanding the meaning of a pattern such as item[1]. This matches any node selected by the expression //(item[1]): that is, any itemelement that is the first itemchild of its parent. The pattern node() matches all nodes selected by the expression //(node()), that is, all element, text, comment, and processing instruction nodes. It does not match attribute or namespace nodes because the expression does not select nodes using the attribute or namespace axes. NOTE: An implementation, of course, may use any algorithm it wishes for evaluating patterns, so long as the result corresponds with the formal definition above. An implementation that followed the formal semantics by evaluating the equivalent expression and then testing the membership of a specific node in the result would probably be very inefficient. The attribute [xsl:]default-xpath-namespace (see [2.3 Standard Attributes]) may be used on an element in the stylesheet to define the namespace URI that will be used for an unprefixed name used as a NameTest within a step of an XPath PathExpression or an XSLT Pattern occurring in an attribute of that stylesheet element or an attribute of a descendant of that stylesheet element. This default namespace URI applies only to a NameTest applied to an axis whose principal node type is elements: it does not apply when the step is using the attribute or namespace axis. The default namespace URI for such a name is the value of the [xsl:]default-xpath-namespace attribute on the innermost ancestor element that has such an attribute, considering all ancestor elements of the attribute containing the XPath expression or XSLT pattern. The [xsl:]default-xpath-namespace attribute must be in the XSLT namespace only if its parent element is not in the XSLT namespace. In the absence of this attribute, an unqualified NameTest (that is, a NameTest that is an NCName) matches an expanded QName whose namespace URI is null: the default namespace (as defined by an xmlns="some-uri" declaration) is not used. The default-xpath-namespace only affects unqualified names (names containing no colon) used in a NameTest. It does not affect other names, for example function names, variable names, or names used as arguments to the key or system-property functions. Issue (runtime-namespace-selection): The default-xpath-namespace facility as proposed here doesn't meet the requirement to match multiple namespaces, or to decide at run-time which namespace to match - as exemplified by the XHTML scenario. Ed. Note: Do we need to add this attribute to all element proformas and to the DTD? In an attribute that is designated as an attribute value template, such as an attribute of a literal result element, an expression can be used by surrounding the expression with curly braces ( {}). An attribute value template consists of an alternating sequence of fixed parts and variable parts. A variable part consists of an XPath expression enclosed in curly braces ( {}). A fixed part may contain any characters, except that a left curly brace must be written as {{ and a right curly brace must be written as }}. NOTE: An expression within a variable part may contain an unescaped curly brace within a StringLiteral. [ERR024] It is a static error if a left curly brace appears in an attribute value template without a matching right curly brace. [ERR025] It is a static error if the string contained between matching curly braces in an attribute value template does not match the XPath production Expr. [ERR026] It is a static error if a right curly brace occurs in an attribute value template outside an expression without being followed by a second right curly brace. A right curly brace inside a StringLiteral in an expression is not recognized as terminating the expression. The required type of each expression within an attribute value template is xsd:string.
http://www.w3.org/TR/2001/WD-xslt20-20011220/
crawl-001
refinedweb
9,817
52.09
Introduction IPython provides a rich architecture for interactive computing with [1]: - A powerful interactive shell. - A kernel for Jupyter. - Support for interactive data visualization and use of GUI toolkits. - Flexible, embeddable interpreters to load into your own projects. - Easy to use, high performance tools for parallel computing. Beyond this point in the article, all of the headings, body, code, and output are copy and pasted from an iPython notebook, saved in html format. Moreover, all of the code was execute from the iPython notebook Logistic Regression This is an example of performing logistic regression in Python with the Scikit-learn module. In this example, we perform many useful python functions beyond what we need for a simple model. This is done partially to explore some more advanced modeling, array manipulation, evaluation, and so on. We will also use several external modules importing data, formatting data, manipulating data, modeling and graphical exploration. Dataset The dataset I chose for this example in Longitudinal Low Birth Weight Study (CLSLOWBWT.DAT). Hosmer and Lemeshow (2000) Applied Logistic Regression: Second Edition. These data are copyrighted by John Wiley & Sons Inc. and must be acknowledged and used accordingly. List of Variables Variable Description Codes/Values Name 1 Identification Code ID Number ID 2 Birth Number 1-4 BIRTH 3 Smoking Status 0 = No, 1 = Yes SMOKE During Pregnancy 4 Race 1 = White, 2 = Black RACE 3 = Other 5 Age of Mother Years AGE 6 Weight of Mother at Pounds LWT Last Menstrual Period 7 Birth Weight Grams BWT 8 Low Birth Weight 1 = BWT <=2500g, LOW 0 = BWT >2500g Problem Statement In this example, we want to predict Low Birth Weight using the remaining dataset variables. Low Birth Weight, the dependent variable, 1 = BWT <=2500g and 0 = BWT >2500g. Import Modules import numpy as np import pandas as pd import statsmodels.api as sm import matplotlib.pyplot as plt from patsy import dmatrices from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import train_test_split from sklearn import metrics from sklearn.cross_validation import cross_val_score Data Pre-Processing # load dataset dta = pd.read_csv("C:\Users\Strickland\Documents\Python Scripts\CLSLOWBWT.csv") Data Exploration dta.groupby('LOW').mean() We can see that on average, women who have children with a low birth weight are more likely to be smokers than nonsmokers. This makes sense based on scientific studies. Let’s take another look at the Birth Number variable. dta.groupby('BIRTH').mean() Low Birth Weight trends upward with more births. Data Visualization # show plots in the notebook %matplotlib inline # histogram of birth number dta.BIRTH.hist() plt.title('Histogram of Low Birth Weight') plt.xlabel('Birth Number') plt.ylabel('Frequency') # histogram of age of mother dta.AGE.hist() plt.title('Histogram of Age of Mother') plt.xlabel('Age') plt.ylabel('Frequency') Let’s take a look at the distribution of smokers for those having children with low birth weights versus those who do not. # barplot of low birth weights grouped by smoker status (True or False) pd.crosstab(dta.SMOKE, dta.LOW.astype(bool)).plot(kind='bar') plt.title('Somker Distribution by Low Birth Weight') plt.xlabel('Smoker') plt.ylabel('Frequency') <matplotlib.text.Text at 0x19b084e0> Now let’s use a stacked barplot to look at the percentage of women having children with low birth weights by age. low_age = pd.crosstab(dta.AGE, dta.LOW.astype(bool)) low_age.div(low_age.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True) plt.title('Low Birth Weight Percentage by Age of Mother') plt.xlabel('Age of Mother') plt.ylabel('Percentage') Prepare Data for Logistic Regression To prepare the data, we want to add an intercept column as well as dummy variables for Age of Mother and Weight of Mother at Last Menstrual Period, since we are treating them as categorical variables. The dmatrices function from the patsy module can do that using formula language. The column names for the dummy variables are messy, so let’s rename those. # create dataframes with an intercept column and dummy variables for y, X = dmatrices('LOW ~ ID + BIRTH + SMOKE + RACE + AGE + LWT', dta, return_type="dataframe") print X.columns Index([u'Intercept', u'ID', u'BIRTH', u'SMOKE', u'RACE', u'AGE', u'LWT'], dtype='object') We now want to convert the numeric (interval) variable AGE to a categorical variable with 4 classes. We also need to flatten y into a 1-D array, so that Scikit-learn will properly understand it as the response variable. # flatten y into a 1-D array y = np.ravel(y) First Logistic Regression Let’s go ahead and run logistic regression on the entire data set, and see how accurate it is. # instantiate a logistic regression model, and fit with X and y model = LogisticRegression() model = model.fit(X, y) # check the accuracy on the training set model.score(X, y) 0.70696721311475408 70.7% accuracy seems good, but what’s the null error rate? # what percentage had low birth weights? y.mean() 0.3094262295081967(X.columns, np.transpose(model.coef_))) Converting Variables & Merging Dataframes Now, we want to take the variable AGE and convert it to a categorical variable, to see if we can improve the model. We will do this by creating two data frames from our original data. We will then merge the two data frames, so both need to contain the ID variable. One data from will have the converted categorical variable age_group, and the other will have the dependent variable and the other independent variables. df1 = pd.DataFrame(dta, columns=['ID','AGE']) df2 = pd.DataFrame(dta, columns=['ID', 'BIRTH', 'SMOKE', 'RACE', 'LWT', 'LOW']) bins = [15, 25, 35, 45, 55] group_names = ['15-24', '25-34', '35-44', '45-55'] age_groups = pd.cut(df1['AGE'], bins, labels=group_names) df1['age_groups'] = pd.cut(df1['AGE'], bins, labels=group_names) categories df1.head(5) We now merge the two data frames. left = df2 right = df1 result = pd.merge(left, right, on='ID') result.head(5) Second Logistic Regression We are now ready to build and evaluate our second logistic regression model, using the merged data frames. y, Z = dmatrices('LOW ~ BIRTH + SMOKE + RACE + age_groups + LWT', result, return_type="dataframe") print Z.columns Index([u'Intercept', u'age_groups[T.25-34]', u'age_groups[T.35-44]', u'age_groups[T.45-55]', u'BIRTH', u'SMOKE', u'RACE', u'LWT'], dtype='object') Since we change the size of y when we converted AGE and merged, we also need to flatten y again into a 1-D array, so that Scikit-learn will properly understand it as the response variable # flatten y into a 1-D array y = np.ravel(y) Before we perform the logistic regression, we want to check the matrix we just formed to ensure it is consistent with our intent. Z.head(5) Finally, we are ready to execute the logistic regression model and see how accurate it is. # instantiate a logistic regression model, and fit with X and y model1 = LogisticRegression() model1 = model1.fit(Z, y) # check the accuracy on the training set model1.score(Z, y) 0.71320754716981127 71.3% accuracy seems good, but what’s the null error rate? We also want to recheck the percentage of low birth weights. # what percentage had low birth weights? y.mean() 0.30867924528301888 Still,(Z.columns, np.transpose(model.coef_))) Increases in Birth Number and RACE correspond to a decrease in the likelihood of having a Low Birth Weight child. A decrease in Smoking Status corresponds to a decrease in the likelihood of having a Low Birth Weight child. For Age Group, the lowest likelihood of having Low Birth Weight child corresponds to the baseline age group (15-24), since all of the dummy coefficients are positive. Model Evaluation Using a Validation Set So far, we have trained and tested on the same set. Let’s instead split the data into a training set and a testing set. # evaluate the model by splitting into train and test sets Z_train, Z_test, y_train, y_test = train_test_split(Z, y, test_size=0.3, random_state=0) model2 = LogisticRegression() model2.fit(Z_train, y_train) LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0) We now need to predict class labels for the test set. We will also generate the class probabilities, just to take a look. # predict class labels for the test set predicted = model2.predict(Z_test) print predicted [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 1. 1. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] # generate class probabilities probs = model2.predict_proba(Z_test) print probs [[ 0.70610757 0.29389243] [ 0.82942743 0.17057257] [ 0.78996271 0.21003729] [ 0.71714883 0.28285117] [ 0.82860543 0.17139457] [ 0.66919892 0.33080108] [ 0.80411336 0.19588664] [ 0.82942743 0.17057257] [ 0.80489925 0.19510075] . . . [ 0.59655861 0.40344139] [ 0.78276183 0.21723817] [ 0.71000202 0.28999798]] As you can see, the classifier is predicting a 1 (having a Low Birth Weight child) any time the probability in the second column is greater than 0.5. Now let’s generate some evaluation metrics. # generate evaluation metrics print metrics.accuracy_score(y_test, predicted) print metrics.roc_auc_score(y_test, probs[:, 1]) 0.678391959799 0.678443267259 The accuracy is 67.8%, which is the close to what we experienced when training and predicting on the same data. We can also see the confusion matrix and a classification report with other metrics. print metrics.confusion_matrix(y_test, predicted) print metrics.classification_report(y_test, predicted) [[239 27] [101 31]] precision recall f1-score support 0.0 0.70 0.90 0.79 266 1.0 0.53 0.23 0.33 132 avg / total 0.65 0.68 0.64 398 Model Evaluation Using Cross-Validation Now let’s try 10-fold cross-validation, to see if the accuracy holds up more rigorously. # evaluate the model using 10-fold cross-validation scores = cross_val_score(LogisticRegression(), Z, y, scoring='accuracy', cv=10) print scores print scores.mean() [ 0.53383459 0.7518797 0.69924812 0.68421053 0.64661654 0.80451128 0.73484848 0.66666667 0.61363636 0.71755725] 0.685300951894 Looks good. It’s still performing at 69% accuracy. Predicting the Probability of Low Birth Weight Child Just for fun, let’s predict the probability of a low birth weight child for a random woman not present in the dataset. She’s a 35-year-old Other race, has had 2 births (has 2 children), is a smoker, and her weight is 132. model.predict_proba(np.array([0, 0, 1, 1, 3, 2, 1])) array([[ 0.60709037, 0.39290963]]) The predicted probability of low birth weight child is 39.3% Next Steps There are many different steps that could be tried in order to improve the model: • including interaction terms • removing features • regularization techniques • using a non-linear model References - IPython Home Dr.Strickland: This is the kind of post that needs to be read and re-read. TYVM. If you the re-blog button, I would love to re-blog this if you dont mind. A OOOPS! THIS IS WHAT I MEANT! 🙂 Dr.Strickland: This is the kind of post that needs to be read and re-read. TYVM. If you ADD the re-blog button, I would love to re-blog this AND FUTURE POSTS ON PYTHON and the like, if you don’t mind.
https://bicorner.com/2015/10/22/logistic-regression-with-ipython/
CC-MAIN-2020-50
refinedweb
2,263
62.78
Double Y axes graph The frame parameter of the plotting methods of the pygmt.Figure class can control which axes should be plotted and optionally show annotations, tick marks, and gridlines. By default, all 4 axes are plotted, along with annotations and tick marks (denoted W, S, E, N). Lower case versions (w, s, e, n) can be used to denote to only plot the axes with tick marks. We can also only plot the axes without annotations and tick marks using l (left axis), r (right axis), t (top axis), b (bottom axis). When frame is used to change the frame settings, any axes that are not defined using one of these three options are not drawn. To plot a double Y-axes graph using PyGMT, we need to plot at least two base maps separately. The base maps should share the same projection parameter and x-axis limits, but different y-axis limits. Out: <IPython.core.display.Image object> import numpy as np import pygmt # Generate two sample Y data from one common X data x = np.linspace(1.0, 9.0, num=9) y1 = x y2 = x ** 2 + 110 fig = pygmt.Figure() # Plot the common X axes # The bottom axis (S) is plotted with annotations and tick marks # The top axis (t) is plotted without annotations and tick marks # The left and right axes are not drawn fig.basemap(region=[0, 10, 0, 10], projection="X15c/15c", frame=["St", "xaf+lx"]) # Plot the Y axis for y1 data # The left axis (W) is plotted with blue annotations, ticks, and label with pygmt.config( MAP_FRAME_PEN="blue", MAP_TICK_PEN="blue", FONT_ANNOT_PRIMARY="blue", FONT_LABEL="blue", ): fig.basemap(frame=["W", "yaf+ly1"]) # Plot the line for y1 data fig.plot(x=x, y=y1, pen="1p,blue") # Plot points for y1 data fig.plot(x=x, y=y1, style="c0.2c", color="blue", label="y1") # Plot the Y axis for y2 data # The right axis (E) is plotted with red annotations, ticks, and label with pygmt.config( MAP_FRAME_PEN="red", MAP_TICK_PEN="red", FONT_ANNOT_PRIMARY="red", FONT_LABEL="red", ): fig.basemap(region=[0, 10, 100, 200], frame=["E", "yaf+ly2"]) # Plot the line for y2 data fig.plot(x=x, y=y2, pen="1p,red") # Plot points for y2 data fig.plot(x=x, y=y2, style="s0.28c", color="red", label="y2") # Create a legend in the top-left corner of the plot fig.legend(position="jTL+o0.1c", box=True) fig.show() Total running time of the script: ( 0 minutes 2.284 seconds) Gallery generated by Sphinx-Gallery
https://www.pygmt.org/v0.5.0/gallery/basemaps/double_y_axes.html
CC-MAIN-2022-27
refinedweb
424
65.83
19 January 2009 16:51 [Source: ICIS news] By Nigel Davis ?xml:namespace> Nothing has changed for the better over the past few weeks. And since the turn of the year it has become increasingly apparent that the lack of downstream demand is sucking the lifeblood out of chemicals. Manufacturing plants across so many industries are being run at greatly reduced rates. Chemicals makers are trying to turn three and four day weeks in downstream industries into reduced operating rates at the plant. BASF’s warning is stark in that it highlights the fact that demand is depressed for all the company’s businesses apart from products for crop protection and food. That, in effect, means that businesses across the entire chemicals chain are depressed. BASF can resort to short-time working and avoid lay-offs so it has flexibility on how and where it cuts back. And it is clear from its latest statement that it can move fast again if it needs to cut back further. Restructuring and efficiency plans are being accelerated as they are at other companies. Sites are being closed in North America and December was dire as other chemicals firms have acknowledged. BASF’s operating rate globally is currently less than 75%. In November, when the company first announced its slew of production cutbacks, operating rates were said to be depressed by between 20% and 25%. So the situation has deteriorated. “The situation remains tough and difficult to predict,” said BASF chairman and CEO Jurgen Hambrecht. “We do not expect the economic environment top improve in the coming months.” The hardest hit businesses for BASF are those selling into the automotive sector but chemicals for construction and other areas are affected. Analysts except a 30% fall in sales from basic chemicals and plastics. That is not surprising given the current state of business outlined on Friday (16 January) by the American Chemistry Council (ACC) in its latest “business of chemistry” report. This weekly update has made sober reading for the past three months. “The individual product reports indicate a sharp downturn in the ending months of 2008,” the ACC’s Emily Sanchez said. “Indeed, the production for chemicals excluding pharmaceuticals dropped at an alarming pace in December and for many basic chemical segments 4th quarter volumes were off over 20% on year-earlier comparisons. The railcar loadings data confirm this weakness extending into January.” It is the extended weakness that gives greatest case for concern. There are few signs of an upturn, if any. Where re-stocking may have been expected, demand is at such a low level that producers are seeking all available storage. LyondellBasell met it creditors on Friday and the presentation it made to them was bleak to say the least. It was filed with the US Securities and Exchange Commission (SEC). The world’s largest polypropylene producer clearly had problems in the fourth quarter with hurricane-related outages exacerbating a difficult situation. It was forced to seek Chapter 11 bankruptcy protection for its The near-term story has been one of a significant decline in orders and an extreme decline in profitability. LyondellBasell stressed the current unpredictability of demand and pricing. Its overall operating rate in the fourth quarter was as low as 50% and it says there are “no definitive indications of reversal” in the demand environment. Polymer prices collapsed in November and December. LyondellBasell says that its polymer margins in the EU in the quarter were “significantly negative”. The change in prices over the quarter gives some idea of just how bad the pricing environment became. WTI crude was down 60% and northwest Europe naphtha down 70%; BASF may be well out of the business of making polyethylene and polypropylene but its latest warning shows that it has not been immune from the global collapse in chemicals demand. Capacity utilisation in chemicals fell to 68.8% in De
http://www.icis.com/Articles/2009/01/19/9186055/insight-demand-remains-depressed-difficult-to-predict.html
CC-MAIN-2014-42
refinedweb
650
54.52
hi, i have the following problem:: i want to read a string from the stdin. The string may contain white spaces so {i think} the scanf family of functions is out of the question... I think the most appropriate function for this case is fgets {if i am wrong please correct me}. Fgets is that it stops when it reads an enter in the input stream and problem arises when i use a menu prior to reading a string that causes an enter to be left in the stream... Is there anyway to circumvent this problem? The only way i found is if i call fgets 2 times in a row{so that the first will discart the enter , and the second will read the string} here is a sample code:: #include <stdio.h> int Menu() { int choice = 0; /* menu choice value */ /* with the following loop we force the user to choose * on of the 3 valid answers: 1, 2, or 3 */ do { fprintf(stdout, "Please choose one of the following:\n" "1. do1 \n" "2. do2 \n" "3. do3 \n"); fscanf(stdin, "%d", &choice); } while ( choice != 1 && choice != 2 && choice != 3); return choice; } int main() { int mychoice = Menu(); printf("\n\n\tyour choice was:: %d\n", mychoice); printf("\nnow lets read a string with fgets\n", mychoice); char buffer[256]; fgets(buffer, sizeof(buffer), stdin); fgets(buffer, sizeof(buffer), stdin); //fscanf(stdin, "%s", buffer); //unfortunatelly it doesn't work because it stops after reading a white space printf("\nhere is your string ::: %s\n", buffer); return 0; } thanks in advance for your help, nicolas PS:: also another weird behaviour is when instead of a number{in the menu response} i type a letter, then i get in an infinite loop.Why is this happening?
https://www.daniweb.com/programming/software-development/threads/97304/io-problem
CC-MAIN-2016-50
refinedweb
295
73.71
Coding Guidelines The document below outlines our code style guidelines. If you have suggestions for things that should be clarified better, etc. please let us know. Please don’t send us suggestions of the kind “I like this indenting style better, could we switch?”.)..) - Use tabs to indent blocks. - A line must not have more than 100 columns. - When wrapping a too long line, add at least one tab to the wrapped parts, and one extra tab per expression levels (for example in a complex condition with parentheses, each parenthese level is indented one tab further). - Functions/classes in namespaces are not indented. - Always separate operators with a space on both sides, and use a space after comma.(); // Redundant private: to separate the fields from the methods: private:) { ... } // Empty functions in class definitions can be written on a single line class Example { void FooFunction() {} }; // Empty functions defined outside a class must be formatted the same as other functions void Foo::FooFunction() { } static int32 my_static_function() { return 42; } if (someCondition) { DoSomething(); // Comments that only affect a single line are usually // written after it, and are indented by a single tab != 0) { // Indent one tab per expression level ... }) - Put exactly two blank lines between functions, as well as between includes and defines blocks, and between defines and variable or function definitions. -. -over abbreviations such as msg. Use rect, frame, boundsover r, menuItemover mi. Use Haiku-defined APIs, types, etc. - Prefer using an Haiku’s <<operators and SetToFormat()umeRoster; volumeRoster.Rewind(); BVolume volume; while (volumeRoster, Git will have your name and email in: /* * Copyright 2004-2007 Haiku, Inc. All rights reserved. *.” without listing any authors. In case you prefer to keep the copyright to yourself, the license header should look like the following; multiple authors can be listed like in this example: /* * Copyright 2007 Jane Doe, optional@email * Copyright 2003-2005 Some Developer, optional@email * the license text and the header guard. - After the copyright header (including the header guard in header files), there are exactly two blank lines before the rest contents. Dead code and Debugging code - Do not leave dead, commented, or #if 0‘ed. Style Checker Tools While the following tools still miss some issues and sometimes report false positives, they are most of the time correct and help to spot the most common issues. Those are still in development and improvements are of course welcomed. haiku-format is a clang-format based automatic code formatter..
https://www.haiku-os.org/development/coding-guidelines%20
CC-MAIN-2021-49
refinedweb
405
64.71
Posted 16 Aug 2015 Link to this post How can i generate a custom format of the data that is displayed in the gridview? for example, i want to generate a timesheet for employees in my organization, the first line of the sheet should be the name of organization, second line should be the name of the employee and then the gridview should be binded with the database. i also want to export data from this gridview to excel sheet. format image is attached bellow can someone help me? Posted 17 Aug 2015 Link to this post Posted 18 Aug 2015 in reply to Dimitar Link to this post thank you so much for your response. the grid view spread exporter is present in which library? its giving me error namespace not found. i have included the reference but still no luck.. any help?
http://www.telerik.com/forums/custom-format-for-data-in-gridview
CC-MAIN-2017-09
refinedweb
144
70.13
Learn the Correct Method to useCallback And useMemo Are you a React developer? Yes! Then, this blog is preferably for you. Today, we are going to share a complete guide on React-Hooks. In the latest version of , two built-in hooks or new features are introduced to optimize the performance, such as . React 16.8 useCallback and useMemo Many programmers may feel quite confused about the difference between useCallBack and useMemo. Why do useMemo and useCallback expect a function? Not to worry if you have the same query or concern about React-Hooks. Here we are trying to share the best and most accurate information. I hope you find your solutions here. Let’s get started. What are React-Hooks? Hooks basically are a new feature introduced in React 16.8; it allows the useState and other React features without writing a single class. Hooks are methods and functions that can "hook into" React's state and lifecycle features. What is useCallback? The useCallback hook is used when a child component is rerendering over and over again without any need. Purpose of useCallback By using useCallback, you can prevent unnecessarily re-rendering components by returning the same instance of the function that is passed instead of creating a new one each time. Using useCallback hooks is useful when a child component is rendering again and again without ever needing to. Passing an array of dependencies and a memorialized callback will result in a memoized version of the callback that is only modified when any dependence changes. Syntax of useCallback: const memoizedCallback = useCallback( () => { doSomething(a, b); }, [a, b], ); What is useMemo? The useMemo hook is used in the functional component of React to return a memoized value. Purpose of useMemo In computer science, memoization is a concept used in general when we can save re-compilation time by returning the cached result. In the , useMemo hooks provide memory-mapped values. functional component of React Using useMemo when very little processing is involved is not really wise. In cases with very little processing, using it could add extra overhead. However, useMemo works well to avoid extra rendering unnecessarily. Now, you must be wondering what memoization is in React-Hooks. Memoization Memoization refers to the concept of not recompiling a function with the same argument again for the next run because it returns the cached result the next time that it is called. At this point, you must have a better or clearer understanding of : useCallback and useMemo. Also, the purpose and usage of both. Now, let’s move ahead with the next topic of discussion. React-Hooks At first glance, it might look like useCallback and useMemo usage are quite similar, however, it’s not. Therefore, there are many chances to get confused about when to utilize useCallback and useMemo. In order to eliminate all the confusion about useCallback and useMemo, let’s have a look at the difference between both hooks. Syntax of useMemo: const memoizedValue = useMemo(() => computeExpensiveValue(a, b), [a, b]); Difference Between useMemo And useCallback In both useMemo and useCallback, the hook accepts a function and an array of dependencies. The major difference between useCallback and useMemo is that useCallback will memory the returned value, whereas useMemo will memory the function. Essentially, the only difference between these hooks is that one caches a value type, and the other caches a function. Let's take an example; if the computationally expensive code accepts arguments and returns a value, you would need to use useMemo so you could keep referencing that value between renders without re-running the computationally expensive code. On the other hand, in order to keep a function instance persistent for multiple renders, useCallback is needed. This is like placing a function outside the scope of your react component to keep it intact. In What Way Does The Dependency Array Work With These Hooks? You might be familiar with the dependency array - useMemo and useCallback if you have ever used the React-Hook useEffect. However, if you’re not familiar with the term, then don't worry. We will explain what useEffect means. useEffect The useEffect hook is used to handle side-effects in functional components. The callback argument is the logic to be used. Dependencies is a list of dependencies of the side-effect: being props or state values. (callback, dependencies) Using this Hook, you tell React that your component needs to perform some action after rendering. When React updates the DOM, it stores the effect you pass (we'll call it "effect"). We can also perform data fetching or call another imperative API to achieve this result as well as setting the document title. Syntax of useEffect: useEffect(() => { const subscription = props.source.subscribe(); return () => { // Clean up the subscription subscription.unsubscribe(); }; }); Now, let’s move on to how the dependency array works with these hooks The useCallback, useMemo, and useEffect methods are ways to enhance performance between re-rendering of components in React-based applications. These functions control some of the features offered by class-based components, such as the persistence of dedicated states through render calls and the appearance of components at various stages of their lifecycle. Basically, you provide an array of values or variables inside the function you provide to the hook. Hope you find it informative.
https://hackernoon.com/react-hooks-what-is-the-difference-between-usecallback-and-usememo?source=rss
CC-MAIN-2021-43
refinedweb
884
56.05
0 Hi all, I am new to programming, and i'm working from a booik which has set me a challenge with a random jumble word program to pair the jumble word with a hint. I have been going round in circles trying to work it out, please help point me in the right direction. Here is my code: import random name = ("stephen","bob", "robert","bill") word = random.choice(name) correct = word jumble = "" while word: position = random.randrange(len(word)) jumble += word[position] word = word[:position] + word[(position +1):] print("\nThe jumble word is: ",jumble) guess = input("\nTake a guess: ") while guess != correct and guess !="": print("\nYour guess is wrong: ") guess = input("\nTake another guess: ") if guess == correct: print("\nWell done you have guessed correctly.") print("\nGoodbye.") input("\nPress the enter key to exit.")
https://www.daniweb.com/programming/software-development/threads/474818/how-do-i-pair-these-up-with-a-hint
CC-MAIN-2017-39
refinedweb
134
65.22
I am trying to write and read data from I2C based EEPROM. When i try the same code from the serial terminal it runs fine and it dont give me error. But when i try to execute same code from main.py file i get the following OS Error "OSError: [Errno 5] EIO" . Below i have given my code. Have anyone faced the same issue, than please tell how to resolve it ? Code: Select all from pyb import I2C, Pin #I2C Initialisation i2c = I2C(1,I2C.MASTER, baudrate = 100000) #Write data "A" to slave address 80 i2c.mem_write("A",80,1,timeout = 5000, addr_size = 8) #Recieve data into Recieve Buffer i2c.mem_read(1, 80, 1, timeout = 5000, addr_size = 8)
https://forum.micropython.org/viewtopic.php?f=2&t=4746&p=27418
CC-MAIN-2019-30
refinedweb
119
77.43
Content-type: text/html pthread_delay_np - Delays a thread's execution. DECthreads POSIX 1003.1c Library (libpthread.so) #include <pthread.h> int pthread_delay_np( const struct timespec *interval); None Number of seconds and nanoseconds to delay execution. The value specified for each must be greater than or equal to zero. This routine causes a thread to delay execution for a specific interval of time. This interval ends at the current time plus the specified interval. The routine will not return before the end of the interval is reached, but may return an arbitrary amount of time after the end of the interval is reached. This can be due to system load, thread priorities, and system timer granularity. Specifying an interval of zero (0) seconds and zero (0) nanoseconds is allowed and can be used to force the thread to give up the processor or to deliver a pending cancelation request. The timespec structure contains the following two fields: tv_sec is an integral number of seconds. tv_nsec is an integral number of nanoseconds. If an error condition occurs, this routine returns an integer value indicating the type of error. Possible return values are as follows: Successful completion. The value specified by interval is invalid. None Manuals: Guide to DECthreads and Programmer's Guide delim off
http://backdrift.org/man/tru64/man3/pthread_delay_np.3.html
CC-MAIN-2016-50
refinedweb
212
59.19
The module optparse was a great addition to Python 2.3, since it is much more powerful and easier to use than getopt. Using optparse, writing command-line tools is a breeze. However, the power of optparse comes together with a certain verbosity. This recipe allows to use optparse with a minimum of boilerplate, trading flexibility for easy of use. Still, it covers 95% of my common needs, so I think it may be useful to others. Discussion The following script is an example of how to use the recipe. <pre> """An example script invoking optionparse, my wrapper around optparse. usage: %prog [options] args -p, --positional: print positional arguments -1, --option1=OPTION1: print option1 -2, --option2=OPTION2: print option2 """ import optionparse opt, args = optionparse.parse(__doc__) if not opt and not args: optionparse.exit() if opt.positional: print args if opt.option1: print opt.option1 if opt.option2: print opt.option2 </pre> The optionparse.parse() function parses the docstring and internally builds an option parser object using optparse; then it uses that parser to parse the command line arguments (please do not confuse parsing the docstring with parsing the command line!) It returns an object containing the given options and a list of positional arguments. If no options and no positional arguments are given, the script exits and returns an helpful message: <pre> $ python example.py An example script invoking optionparse. usage: example.py [options] args -p, --positional: print positional arguments -1, --option1=OPTION1: print option1 -2, --option2=OPTION2: print option2 </pre> A similar message is also obtained if the -h or --help option is passed. If the -p flag is passed, the list of positional arguments is displayed: <pre> $ python example.py -p *.txt [list-of-text-files-in-the-current-directory] </pre> If the option argument 1 or 2 are passed, they are displayed: <pre> $ python example.py -1hello -2world hello world </pre> I think you get the idea. Within the current implementation there are restrictions with the format of the usage block in the docstring: for instance it cannot contain blank lines and one must be careful with characters such as ":" "," "=". It is up to you to build up a more sophisticated parser, if you care enough. The purpose of this recipe is just to give the idea. Beautiful.. Similarly, this recipe treats the docstring's definition of the supported command line options as definitive, and doesn't require you to repeat yourself. See also. You could probably perform more complicated parsing, and introduce some additional syntax checking at the same time, by adapting parts of the CMDSyntax module's Syntax class: __doc__ disappears with -OO. Just a reminder that the __doc__ string will be empty when Python is invoked with -OO. Otherwise, an excellent recipe. Can easily be enhanced to take default values. With the code below, I'm very much enjoying this recipe. I've tweaked it slightly to allow default values, and thought that might be a useful change... (comment continued...) (...continued from previous comment)
http://code.activestate.com/recipes/278844/
crawl-002
refinedweb
501
57.87
I have been using C# to develop .aspx web application. As most know, the default location to build these application in MS Visual Studio is in the wwwroot directory. When using C#.NET, each program builds into it's own namespace/directory. There are also many files created by Visual Studio that are associated with these applications. Now, when it is time for me to send the files from my wwwroot directory to my webpages server, which files do I need to send over? I do not run the server on my machine. I send them to an outside server but build the application on my local machine. Do I need to send the entire namespace? Or are there particular files in the namespace that need to be sent to the server so that the .aspx page my run properly (ie. .aspx.cs and other related files) Any help with this topic would be greatly appreciated. Thanks.
http://forums.devshed.com/net-development/85493-net-files-server-last-post.html
CC-MAIN-2015-40
refinedweb
157
67.96
loxley commented on 2015-03-17 06:15 To get this to build I had to change _gitname to 'goobook' and source to '.../goobook.git'. firecat53 commented on 2015-03-12 17:40 Switched to https link on gitlab, as gitorious is going away soon. Scott nougad commented on 2015-03-07 20:02 What about switching sources to https:// protocol to avoid need of a ssh key for gitorious? firecat53 commented on 2013-08-26 01:18 Updated for python2-setuptools. Thanks! Scott the_isz commented on 2013-08-25 08:09 Until the change makes it upstream, I propose the following PKGBUILD which contains the changes suggested by petelewis: Kind regards petelewis commented on 2013-08-20 08:09 Arch has just replaced python2-distribute with python2-setuptools. To get this to build and work, I found that I had to: 1) Change the depepend and makedepend from python2-distribute to python2-setuptools. 2) Replace line 58 of setup.py: 'distribute', with 'setuptools', I guess this should/will probably make it upstream at some point, but then I have no knowledge of python packaging. HTH. firecat53 commented on 2013-08-14 05:04 Thanks! Guess I should test in a clean environment.... Fixed. Scott loxley commented on 2013-08-13 06:16 Missing a dependency for python2-six also. goobook Traceback (most recent call last): File "/usr/bin/goobook", line 5, in <module> from pkg_resources import load_entry_point File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 2850,: six Installing it will fix it. firecat53 commented on 2013-08-11 03:03 Updated goobook-git (1.5) and python2-hcs_utils (1.4rc5) to most recent releases. You shouldn't get any errors now. Thanks for the heads up. Scott geekinthesticks commented on 2013-08-07 16:43 Latest version seems to require hcs-utils>=1.3. However, when I update to hcs_utils 1.4rc5 I get the following error: pkg_resources.DistributionNotFound: argparse>=1.1 I have updated my Arch, so all the python packages should be the latest version. firecat53 commented on 2013-06-12 16:55 Fixed, thanks! Scott Stebalien commented on 2013-06-12 02:08 You should add the '--tags' flag to 'git describe'. As it is, it generates 'v1.2.85.gbf7eb0f' instead of '1.4.1.gbf7eb0f'. firecat53 commented on 2013-05-23 02:41 Fixed, thanks! Scott bidossessi commented on 2013-05-23 01:16 Package fails to install with yaourt. Starting pkgver()... ==> Updated version: goobook-git 1.4-1-gbf7eb0f.bf7eb0f-1 ==> ERROR: pkgver is not allowed to contain colons, hyphens or whitespace. My solution: append "| sed 's|-|.|g'" to last pkgver() line. firecat53 commented on 2013-04-16 04:22 PKGBUILD updated for pacman 4.1 standards. Scott firecat53 commented on 2013-04-07 23:23 Thanks! I'll have a look at this and update as soon as I can. Appreciate the heads up! Scott poxar commented on 2013-04-07 09:27 Hey, I've updated the PKGBUILD to use the new features of pacman 4.1 firecat53 commented on 2012-12-24 21:09 Fixed. Thanks :) Scott ISF commented on 2012-12-21 06:36 Hi, Please change the reference to ${startdir}/pkg in line 39 to "${pkgdir}" (with quotes), otherwise packaging fails in yaourt when exporting packages to non-default locations. firecat53 commented on 2012-11-12 19:59 Updated dependencies to python2-keyring and python2-hcs_utils. Note - AUR package python-hcs_utils renamed to python2-hcs_utils. I emailed a merge request to AUR-general. Scott ISF commented on 2012-11-12 12:40 Hi, please update the package to use python2-keyring instead of python-keyring (they will be merged soon). Thanks firecat53 commented on 2012-11-10 19:18 Fixed, thanks! Scott ervandew commented on 2012-11-10 02:11 Looks like python-simplejson is now python2-simplejson. firecat53 commented on 2012-10-10 04:22 Fixed, thanks. If you've got python-gdata installed, make sure you uninstall it before installing goobook, which will install python2-gdata. Scott The-Compiler commented on 2012-10-09 20:04 python-gdata was renamed to python2-gdata it seems. firecat53 commented on 2012-08-14 21:35 Well, I could probably figure it out, but as python-simplejson is available in community still, I'd rather not take the time right now to patch the original source...which still requires it. I'll keep an eye on it, though. It doesn't look like goobook has been updated in over a year now. Scott myle commented on 2012-08-14 11:02 Is python-simplejson required? I thought it was included in the main distribution of Python since Python 2.6. firecat53 commented on 2011-10-10 17:51 Updated dependencies - python2-distribute instead of setuptools firecat53 commented on 2010-11-03 09:22 python-hcs_utils is one of the dependencies in the PKGBUILD, but perhaps it needs reinstalling following the python2 change recently. Scott Anonymous comment on 2010-11-03 01:42 I also had the exact error message as ricklpt below, and I was able to correct it by installing python-hcs_utils firecat53 commented on 2010-11-01 17:14 Is python-keyring installed? If so, try reinstalling it. Scott Anonymous comment on 2010-11-01 09:49 When i install this package and run it i get the folloeing error message, i dont know if this is goobook-related but maybee its usefull to post here .-(~)------------------------------------------------------------------------------------- `--> goobook Traceback (most recent call last): File "/usr/bin/goobook", line 5, in <module> from pkg_resources import load_entry_point File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 2671, in <module> working_set.require(__requires__) File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 654, in require needed = self.resolve(parse_requirements(requirements)) File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 552, in resolve raise DistributionNotFound(req) pkg_resources.DistributionNotFound: keyring>=0.2 Rick firecat53 commented on 2010-10-25 16:08 Ah, updated to remove python-argparse dependency. Scott cmb commented on 2010-10-25 07:49 Just remove python-argparse from depends=, and all should be well. argparse is part of Python 2.7's standard library. firecat53 commented on 2010-10-19 17:58 Updated to reflect python2 dependency. Note -- until python-argparse is fixed, you have to change the depends line in that PKGBUILD to python2 (instead of python<2.7). firecat53 commented on 2010-08-16 16:25 Added python-keyring dependency. ejstacey commented on 2010-08-16 01:34 Just an FYI, this also requires python-keyring >= 0.2. anpieber commented on 2010-04-23 06:02 seams to work fine. Thank you very much! firecat53 commented on 2010-04-23 00:25 1. Added python firecat53 commented on 2010-04-23 00:14 1. Added anpieber commented on 2010-04-22 07:44 ah ok found the required version: anpieber commented on 2010-04-22 07:31 ok, i know that always the latest version is used; but this case is different. Some additional references are required: 1) python-argparse (); but the package is in a lower version (1.0.1) than required by goobook (1.1). Nevertheless goobook also works with 1.0.1; I've flagged the python-argparse package out of date. We can wait for the python-argparse maintainer to upgrade his package or talk to the goobook maintainer to use the >1.0 version in the requirements 2) hcs_utils (git://gitorious.org/hcs_utils/mainline.git) Currently there is no aur package available for this reference. Besides two problems again: 1) version numbers do not match (1.0 in hcs_utils; 1.0rc1 required by goobook). 2) I'm no python expert and have no idea why this happen (I find nothing within the setup.py which can cause this behavior), but if i use sudo python setup.py install it produces a hcs_utils of version x.x.dev. Any ideas how I can get rid of it? @1) I'm going to write to the goobook mailing list @2) no idea, sry; otherwise I'll provide the hcs_utils package on my own kind regards
https://aur.archlinux.org/packages/goobook-git/?ID=36035&comments=all
CC-MAIN-2016-44
refinedweb
1,356
67.96
There are many approaches for image enlargement. This program was written to test a newly developed image interpolation algorithm. To use this program, you first have to open a picture using the Open button. After you open the picture of your choice, the picture will appear in the left picture box. In this box, you would see a white rectangle. This shows the part of the original picture that appears in the right picture box. The picture in the right picture box can be resized using the zoom track bar. You can also change the sharpness using the sharpness track bar to make the picture clearer. After you have the desired zoom and proper sharpness, you save the picture using the Save button. The larger the zoom factor, the longer it will take. The newly saved enlarged picture will have much sharper results, but still maintains the original details. This program includes the Zoomable and Scrollable Picturebox, which is described on CodeProject. Some members suggested this user control should include crop and selection of images. I took the suggestions into consideration and came up with this program. In this program, I have added a property for the Zoomable and Scrollable Picturebox control: Selection selection; public Selection Selection { get { return selection; } set { selection = value; Invalidate(); } } and two public methods: public Point ConvertControlPointToCanvas(Point point) { Point pt = new Point(); if (viewRectWidth > canvasSize.Width * zoom) { pt.X = (int)((float)(point.X - viewRectWidth / 2 + canvasSize.Width * zoom / 2f) / zoom); pt.X = Math.Min(Math.Max(pt.X, 1), canvasSize.Width - 1); } else pt.X = (int)((float)(point.X + hScrollBar1.Value) / zoom); if (viewRectHeight > canvasSize.Height * zoom) { pt.Y = (int)((float)(point.Y - viewRectHeight / 2 + canvasSize.Height * zoom / 2f) / zoom); pt.Y = Math.Max(Math.Min(pt.Y, canvasSize.Height - 1), 1); } else pt.Y = (int)((float)(point.Y + vScrollBar1.Value) / zoom); return pt; } public Point ConvertCanvasPointToControl(Point point) { float xOffset = viewRectWidth > canvasSize.Width * zoom ? (viewRectWidth - canvasSize.Width * zoom) / 2f : -hScrollBar1.Value; float yOffset = viewRectHeight > canvasSize.Height * zoom ? (viewRectHeight - canvasSize.Height * zoom) / 2f : -vScrollBar1.Value; Matrix mxCanvastoContol = new Matrix(); mxCanvastoContol.Scale(zoom, zoom); mxCanvastoContol.Translate(xOffset, yOffset, MatrixOrder.Append); Point[] pts = new Point[] { point }; mxCanvastoContol.TransformPoints(pts); return pts[0]; } I have also written a class selection.cs for the Selection property: using System; using System.Collections.Generic; using System.Text; using System.Drawing; namespace YLScsZoom { public class Selection { Color lineColor = Color.Black; float lineWidth = 1.0f; Point location = new Point(0, 0); Size size = new Size(0, 0); public Color LineColor { get { return lineColor; } set { lineColor = value; } } public float LineWidth { get { return lineWidth; } set { lineWidth = value; } } public Size Size { get { return size; } set { size = value; } } public Point Location { get { return location; } set { location = value; } } public void Draw(Graphics g) { Pen p = new Pen(lineColor, lineWidth); g.DrawRectangle(p, new Rectangle(location, size)); p.Dispose(); } public virtual bool isHitting(Point pt) { Rectangle r = new Rectangle(location, size); if (r.Contains(pt)) return true; else return false; } } } You can select an image with selection through its properties Location and Size. In this program, I have used the mouse events of the control to get the point in the control, then converted it to an image, which is the selection Location, by using the method ConvertControlPointToCanvas. The selection Size is determined by the zoom factor and the size of the client window that shows the enlarged image. The image interpolation approach used in this program is totally different from the methods mentioned by Libor Tinka. Mine has two parameters: zoom factor and sharpness factor. using System; using System.Drawing; namespace YLScsLib.Imaging { public class YLScsZoom { public YLScsZoom(); public static Bitmap Apply(Bitmap srcBmp, float zoom); public static Bitmap Apply(Bitmap srcBmp, float zoom, float factor); public static byte[,] Apply(byte[,] srcBytes, int nwidth, int nheight, float factor); public static ChannelBytes Apply(ChannelBytes srcBytes, int nwidth, int nheight, float factor); } } Their values are provided by the two track bars in this program. Thanks a lot for trying this program and my image interpolation algorithm. General News Question Answer Joke Rant Admin
http://www.codeproject.com/KB/graphics/YLScsZoom.aspx
crawl-002
refinedweb
673
51.34
Languages? Jack G. Ganssle is a lecturer and consultant on embedded development issues. He conducts seminars on embedded systems and helps companies with their embedded challenges. Contact him at jack@ganssle.com. His website is. Using OOP for embedded system is becoming neccessary. Imagine a portable media player with 1M LOC, and 19 engineers and 2 sub-contracting firms working. Its becoming more difficult to design and develop in 'C. So the developer used OOC. The design to implementation was simple. In good old days, embedded systems only meant 8 bit memory starved processors. Now, 32 bit processor using few mega bytes of code, and data are not un-common. I remember Jack answering the wannabe embedded system programmers about the rules of embedded systems. I now think, the gap between desktop programming and embedded systems programming is fading with large embedded systems. - Britto Edward Victor OOP benifits have yet to reach my team here. In my first embedded project, we used completely OOP (C++) and put some limitations on features like virtual functions that could attribute to performance/code bloat. In my later projects, we used OO design methodology along with the team because the team did not have enough knowledge of C++ to code and debug (and no time and money to invest in training as usual). These projects made us define how we should code object oriented code using C. In the final few projects, I work with a much larger and a much "older" team - who have done Structured Programming for ages using C. We are using simple structured design techniques for low level programming and for higher level application and control software, partial OO design is used. But coding is completely 100% in C. My trend has been a delining use of C++ through the years as compared to your stats :( I find the only reason C++ is not getting used is due to inertia to even try it out for purposes of evaluation. I dont find any other reason to use C and C alone. A hybrid should always do better. - Saravanan T S Jack, I claim that we used polymorphism when I designed a bunch of user interface functions, in C, to respond to events. The events were: "up key pulsed", "down key pulsed", "enter key pulsed", "initialize", "paint (redraw)", and "tick update (250 ms.)". The simplest kind of function was a generic "menu task", kind of like a Windows "window manager" function. We also implemented a menuing system, with refined menu tasks to offer selectable menus, radio button menus, multi-check menus, and a family of dialog editors to edit gas concentrations, times, dates, and other values. The dialogs had another "polymorphic" interface, whereby the behaviors fit into a structure of functions for "convert binary to string", "convert string to binary", "edit char", "next char", "range check binary", and "display error", all in C, for each of the various editable data types. Are you claiming that none of it was polymorphic, just because it was written in C? - Keith R. Hill I think OOP makes sense in some applications, but most embedded systems still use small processors. If you work on 8 bit ones you are not a dinosaur no matter what the magazines imply. - Tom The bandwagon effect is the observation that people often do (or believe) things because many other people do (or believe) the same. The effect is often pejoratively referred to as herd instinct, particularly as applied to adolescents. Without examining the merits of the particular thing, people tend to "follow the crowd." The bandwagon effect is the reason for the bandwagon fallacy's success. I sincerely apologize, but if you are using C++ (OOP) in your embedded designs... it's because your manager has gotten on the Bandwagon!!! C++ (OOP) is the wrong tool for embedded designs. Everything you need to do, can be done more efficiently with C. You have more control over the entire system, than you do with C++. - Steve King we tend to "pick & choose" on a per-module basis as to OOP vs functional programming, where we decide for example that polymorphism is good ( accessing different types of memory using same prototype ) vs the pointer overhead is bad ( writing to a display ). I'd just like to point out to those die-hard OOP exponents that if (as claimed) it is as efficient an idiom, why it is unsuitable for slower processors? - Ed Straker ok... Let us bend some myths out there. --> First of all you can do OOP just fine with plain old 'C' --> C++ should not cause code bloat...unless you are using STL that is. --> You really ought not to use STL in an embedded project anyhow. --> You can get code bloat with plain old 'C'...hmmmm...anyone remember all that old MS Windows stuff?? --> Just as we have had die-hards that still profess assembler to be the one and only, 'C' will fall into this category also. --> You can obfuscate 'C' just as readily as 'C++' --> Believe it or not you can still mix assembler with C++ and get away with it! - Ken Wada These are just a languages after all. It is all about the way how we prefer to express our thoughts.Take a look to the natural languages over the world - different habits, lifestyles, alphabets... The use of C/C++ should be probably application driven - the more human related features/interfaces->the more resources->the more need to use higher level of abstraction and expression (C++). Can someone tell me the ratio of commercial C++ compilers for 8-bit toward higher-bit controllers/processors ??? This herd-effect survey might explain something. - kolio I agree with Steve when he talks about control over the entire system. I have been using OO approach for design and that makes implementation easier. Coding is strictly in C. - Monali Bhalerao One reason responses to these surveys are all over the place is that "embedded system" takes in too much territory. A lot of designs are single chip 8-pin micros with half a K of memory. Others have embedded PCs with Linux, etc. Yet others are an assembly line that fills a building with dozens of networked computers. You don't use the same tools and techniques to build a dollhouse for your daughter as you do to build a skyscraper. - Tim McDonough I tried once to use C++ in a design that could be classed "hybrid" embedded (PC based). I was fresh out of a course in C++ at a University. The prof had tried teaching the "current" view of "computer science" with C++. The language issues bogged down the course. After trying to use C++ in real life, I found it easier to revert to C constructs and structure, rather than OOP constructs on the original project. It moved faster. Since then, I have coded mostly 8 bit microcontrollers in C, mostly in vendor supported toolsets (data acq and networking). I have also coded some PC/Windows using C# . The smaller devices, to me, work best with code written as a set of functions called from a main loop or interrupt driven "structure". Visual Studio forces OOP by design of the tools, built in functions, etc, so is a natural OOP language. Maybe the future trend is OOP, but I see it happening slowly, and with emphasis on new code on newer, higher bit count processors. Code maintenance is probably best done in the original language concept, structured or OOP. - Douglas Datwyler As I read Jack's article I immediately thought about the three biggest embedded systems I maintain. One is in C on a proprietary multitasker, one is in C++ on embedded MontaVista Linux, and one is in C++ on WindowsXP Embedded. Where we use C++ it is first for encapsulation, and second for inheritance, just like Jack's numbers. On the project in C, the reasons we WISH it were in C++ are likewise encapsulation and then inheritance. Embedded design penalizes sloppy work more harshly than the desktop world. If you take "advantage" of the C++ willingness to create and destroy objects dynamically (my favorite: returning an object from a function, and assigning it to another variable of the same type) you are asking for trouble, especially in embedded. So, don't do that. I find that a basic understanding of what the compiler is really doing goes a long way to choosing what behaviors are acceptable in embedded and what are not. In general I prefer C++ to C because it offers more tools, but I have a healthy sense of what features of each language to avoid in production code. And like many readers noted, in those projects where we only have C, we do our best to apply the good lessons from C++ anyway. - Mark Bereit This survey would be more informative when co-related against programming languages used. C++ is the most popular choice in the embedded world, but not the only choice. Bu what percentage are using C/C++? How many people are using Java? It was originally designed to parallel the the C++ ideas of OOP, yet eliminate many of the platform/porting issues and programming pitfalls. Only recently is it being adopted by the real-time community. Is it up to 20%? Other projects favor strongly typed languages such as Ada. But outside the government and military, who is using these languages? Still others use a systems design environment like MATRIXx or Simulink as their development platform and rely on some code generation to get lower-level C code which can then be compiled. Likely, these are very large projects. How about developers of distributed or multi CPU systems. How much more, or less likely to lean towards OOP? - Joe Sallman I would choose tools I find best suited for the task. On 8-bit processor I prefere C with necessary minimum of assembly. It gives better control of resources and resulting code is often more portable. And usually there is not enough RAM to fully utilize OO features. However on 32-bit device with real operating system I would consider C++ or other OO language, depending on application. If you need multiple instances of same object, multithreading and inheritance, C++ (or Java, etc...) is way to go. It's particulary handy for graphics and communication. But such design is more custom-built computer then classic embedded device, isn't it? - Mladen Matosevic I have read about OOP over the years, but everything I have read seemed to indicate that it would add more complexity to the software I write now using plain old "C" (this is for 8-bit, single chip uC's), with no real advantage. I have been playing around with Microsofts C# Express package, and what I HAVE seen of OOP does not impress me, but on the other hand, I don't write 1 million line programs. In fact, I don't think I've written more than 100K LOC in my entire 25+ year career! (obviously, you can tell I'm NOT a programmer!). I think that small projects, such as the kind I do, don't really need the OOP features. However, I would be interested in seeing an article where someone who is familiar with OOP looks at a variety of small projects and comes up with a reasonable way of deciding if OOP would make a difference. - Dave Telling At my work we design mostly large embedded systems (100,000+ lines of code). When I started ten years ago we started using full object-oriented C++. I started with a book called "Thinking in C++" which was good in that it explained what the assembler code would look like when using various C++ features. I would still write interrupt handlers in assembly language and/or C but for large systems C++ is great for the remaining code. For instance using volatile and const to describe objects we can use C++ to read read-only registers and write to write-only registers and have the optimizer turned up to the highest level. In C I always put these in separate files which did not get optimized by the compiler. I agree that you can't use every feature of C++ in embedded systems. Lately we have been making networked embedded systems and these are mostly written in Java. However we are not doing any register level manipulation in these systems and they are mostly soft real-time. - Tom Dietrich If All You Have is a Hammer Everything Starts to Look Like a Nail - fortunately, many of us working with computers, have over 2,500 hammers to choose from this chart: here or there . And it all apparently got started like this, as Genesis 11:1-9 informs: "Come, let us descend and confuse their language, so that one will not understand the language of his companion". More background about the Tower of Babel story can be obtained from this intro here. However, Jack, I think it is a loaded question you ask. From your research, there is 71.4% of those who use any (some) form of OOP. On the other hand, there is 63.7% who use none of those forms. Marginal difference, considering the vast area of what is being defined as "embedded system", today. It boils down to a choice: Windows CE (or Linux, OS X, any RTOS) written in assembly -and- TV set "remote control gizmo" running Linux and "application is done in Java", -or- the other way around. Purists might argue, but I am not and I won't - the choice is clear. As kolio said above earlier: "[it is] all about the way how we prefer to express our thoughts". Take a crappy thought/idea and implement it in the latest CS concept, run it on the latest HW and it will be - a crap. The language did not morph the thought into something that could not ever be. - Roger Lynx An interesting article and comments. The language matters not, the success or failure relies on the underlying design methodology. Basically how disciplined is your development team (even if it's a team of one) at following the plan. I have written code for many embedded systems: Java, C, C++, eForth, assembly, ... The successful ones were the projects where we had a plan and stuck to it. The failures were the ad hoc design projects, you know the "I can whip that out in a week!" type (turning into two weeks or way more. As far as C++ OOD goes, if you restrict the use of certain aspects of the language that may kill an embedded system C++ has many benefits. OOP for OOPs sake is a waste. Use what you feel gives you the most return on you time and effort. It's a business decision after all. - Bill Bynum I find it hard to believe, although I know it's prevalent, that C is the preferred embedded programming language. I can see where using virtual functions, exceptions, etc. in an 8-bit project might exceed your memory/performance constraints, but just using the non-OOP feature like function name overloading and namespaces seems like it would be a big enough win that *all* projects would be written in "C++ as a better C". - Mike Cox
http://www.embedded.com/electronics-blogs/break-points/4025706/Languages
CC-MAIN-2013-48
refinedweb
2,563
63.29
How to extend and use members of a superclass in c++? I'm trying to code a class to handle joystick input (irrelevant), and being rusty on inheritance and new to c++ I'm having some confusion while trying to create a subclass of my joystick class. Here's my code //superclass's .h #ifndef JOYSTICKINPUT_H #define JOYSTICKINPUT_H #include "WPILib.h" class JoystickInput { public: JoystickInput(Joystick*); Joystick * joystick; Victor * myVictor [3]; bool buttons [10]; bool buttonClicked(int id); void testForActions(); }; #endif And here's its definition //superclass's .cpp #include "JoystickInput.h" JoystickInput::JoystickInput(Joystick * joy) { joystick = joy; for (int x = 0; x < 10; x++) { buttons[x] = false; } } bool JoystickInput::buttonClicked(int id) { if (buttons[id] == false and joystick->GetRawButton(id) == true) { buttons[id] = true; return true; } else if (buttons[id] == true and joystick->GetRawButton(id) == false) { buttons[id] = false; return false; } else { return false; } } void JoystickInput::testForActions() { } Now I'm trying to extend this with a JoystickOne class, as its behavior is slightly different. To this end I created a JoystickOne.h and a JoystickOne.cpp //Joystickone.h #ifndef JOYSTICKONE_H #define JOYSTICKONE_H #include "WPILib.h" #include "JoystickInput.h" class JoystickOne : public JoystickInput { public: JoystickOne(Joystick*); Joystick * joystick; Victor * myVictor; bool buttons [10]; bool buttonClicked(int id); void testForActions(); }; #endif And the .cpp #include "JoystickOne.h" #include "WPILib.h" JoystickOne::JoystickOne(Joystick * joy) : JoystickInput(joy) { //joystick = joy; //myVictor = new Victor(1); /*for (int x = 0; x < 10; x++) { buttons[x] = false; }*/ } bool JoystickOne::buttonClicked(int id) { if (buttons[id] == false and joystick->GetRawButton(id) == true) { buttons[id] = true; return true; } else if (buttons[id] == true and joystick->GetRawButton(id) == false) { buttons[id] = false; return false; } else { return false; } } void JoystickOne::testForActions() { if (buttonClicked(1)) { } if (buttonClicked(2)) { } if (buttonClicked(3)) { //myVictor->Set(.3); } if (buttonClicked(4)) { } } My problem is that I'm not quite sure what's extraneous in the JoystickOne class. I come from Java, so I'm used to being able to just extend a superclass and automatically use all of its methods and members. I'm confused because of C++'s seperation into .h and .cpp files; from what I've learned by messing around I have to declare all variables and methods I wish to use, even if they're members of the superclass. I don't think I have to define method buttonClicked() twice, although I don't have a robot so I can't actually test that now. Basically, I'm asking what I can cut out from the definition of the JoystickOne class, and how to do it. If any of you have advice on some good OOP practices in C++ feel free to share, or maybe even clear up some java-isms that I have. Thanks! Answers You should mark methods that can be overridden as virtual in your base class. To override it in a derived class, simply re-define it in your derived class. Example: class Base{ public: virtual void overrideThis() { std::cout << "Base" << std::end; } void test() { std::cout << "Base::test()" << std::endl; } }; class Derived : public Base{ public: void overrideThis() { std::cout << "Derived" << std::endl; } }; Now if you instantiate: Derived d; d.overrideThis(); // Will print 'Derived' d.test(); // Will print 'Base::test()' As for member variables. Private members defined in your base will class will NOT be available in your derived class. On the other hand, protected and public member variables will be accessible. Need Your Help Animating text-shadow to fade in and out javascript jquery animation jquery-animateI'm trying to animate text-shadow to fade in and out using the snippet shared here: Instanciate a class inside a class NullPointerException java class methods nullpointerexceptionI'm getting a nullPointerException error in the line playlist1.firstSong = song; below (9th line). Any ideas?
http://unixresources.net/faq/16326144.shtml
CC-MAIN-2019-04
refinedweb
624
53.81
sendmsg - send a message on a socket using a message structure #include <sys/socket.h> ssize_t sendmsg (int socket, const struct msghdr *message, int flags);. The msg_iov and msg_iovlen fields of message specify zero or more buffers containing the data to be sent. msg_iov points to an array of iovec structures; msg_iovlen must,. Upon successful completion, sendmsg() function returns the number of bytes sent. Otherwise, -1 is returned and errno is set to indicate the error. The sendmsg() function willFAULT] - The message parameter, or storage pointed to by the msg_name, msg_control or msg_iov fields of the message parameter, or storage pointed to by the iovec structures pointed to by the msg_iov field can not be accessed. - [EINTR] - A signal interrupted sendmsg() before any data was transmitted. - [EINVAL] - The sum of the iov_len values overflows an ssize_t. - [EMSGSIZE] - The message is tomsg()msg()to(), setsockopt(), shutdown(), socket(), <sys/socket.h>.
http://pubs.opengroup.org/onlinepubs/7990989775/xns/sendmsg.html
CC-MAIN-2015-18
refinedweb
149
56.96
This is really two changes, one which fixes the just-plain-wrong behavior of the return-value of file:close() when called on a pipe on Linux (and presumably also other posix platforms) [this is the more important of the two], and the addition of a second return-value to the API, indicating the type of exit of the child process. I wrapped the two into the same patch as I consider them to be tightly related. Test case to follow. >From 661a0822cf5dead4f35779f5d288df9e48e4834f Mon Sep 17 00:00:00 2001 From: David Favro <lua@meta-dynamic.com> Date: Tue, 18 Jan 2011 13:00:09 -0500 Subject: [PATCH] file:close() return-value is consistent with the manual. The 5.2-alpha manual says that file:close() [i.e. io.close()], when called on a file-descriptor that had been opened by io.popen(), returns the process's exit-status; this was not the behavior under Linux and perhaps other posix platforms as well: it was returning the return-value of pclose(3), which is documented as being the same as that returned by wait4(2), which is documented as being the same as that returned by waitpid(2), which must be interpreted by the macros in <sys/wait.h>. This is corrected. Furthermore, the single return-value doesn't adequately reflect the possible exit statuses of a child process, which can also exit with a signal: so I have added an additional return-value indicates if the child process exited via exit() (in which case it is "e") or signal (in which case it is "s"). This additional return-value will of course be nil on platforms which don't support it. In the case of a signal, the first return-value is the signal number. Signed-off-by: David Favro <lua@meta-dynamic.com> --- doc/manual.html | 9 +++++++-- src/liolib.c | 20 ++++++++++++++++++-- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/doc/manual.html b/doc/manual.html index c154821..1b608a3 100644 --- a/doc/manual.html +++ b/doc/manual.html @@ -8795,8 +8795,13 @@ but that takes an unpredictable amount of time to happen. <p> If <code>file</code> was created with <a href="#pdf-io.popen"><code>io.popen</code></a>, -a successful close returns the exit status of the process. - +a successful close returns the exit status of the process. On platforms that +support it, there may be a second return value which is either "e" to indicate +that the child process exited due to an exit() call, or "s" to indicate that the +child process exited due to a signal, in which case the first return-value was +the number of the signal which caused the child to terminate, rather than its +exit status. In other cases, and on platforms which do not support it, the +second return-value is nil. diff --git a/src/liolib.c b/src/liolib.c index 658601a..1c5bf00 100644 --- a/src/liolib.c +++ b/src/liolib.c @@ -10,6 +10,10 @@ #include <stdlib.h> #include <string.h> +#if defined(LUA_USE_LINUX) || defined(__linux__) + #include <sys/wait.h> +#endif + #define liolib_c #define LUA_LIB @@ -147,8 +151,20 @@ static int io_pclose (lua_State *L) { if (stat == -1) /* error? */ return pushresult(L, 0, NULL); else { - lua_pushinteger(L, stat); - return 1; /* return status */ + #if defined(LUA_USE_LINUX) || defined(__linux__) + /* return: + * process-exit-status or signal-number, + * exit-type ("s" for signal or "e" for exit) + */ + const int exited = WIFEXITED( stat ); + const int signaled = WIFSIGNALED( stat ); + lua_pushinteger( L, exited ? WEXITSTATUS(stat) : (signaled ? WTERMSIG(stat) : 0) ); + lua_pushstring( L, exited ? "e" : (signaled ? "s" : "") ); + return 2; + #else + lua_pushinteger(L, stat); + return 1; /* return status */ + #endif } } -- 1.7.0.4
https://lua-users.org/lists/lua-l/2011-01/msg01089.html
CC-MAIN-2022-05
refinedweb
609
56.05
1.1 What is Python?. To find out more, start with the Beginner's Guide to Python. 1.2 Why was Python created in the first. 1.3 What is Python good for? Library Reference to get an idea of what's available. A wide variety of third-party extensions are also available. Consult the Python Package Index to find packages of interest to you. 1.4. 1.17 Do I have to like "Monty Python's Flying Circus"? No, but it helps. :) 2 Python in the real world 2.1 How is the Python Software Foundation?. for a full list of Python editing environments. 3 Upgrading Python 3.1 What is this bsddb185 module my application keeps complaining about? Starting with Python2.3, the distribution includes the PyBSDDB package <> as a replacement for the old bsddb module. It includes functions which provide backward compatibility at the API level, but requires a newer version of the underlying Berkeley DB library. Files created with the older bsddb module can't be opened directly using the new module. Using your old version of Python and a pair of scripts which are part of Python 2.3 (db2pickle.py and pickle2db.py, in the Tools/scripts directory) you can convert your old database files to the new format. Using your old Python version, run the db2pickle.py script to convert it to a pickle, e.g.: python2.2 <pathto>/db2pickley.py database.db database.pck Rename your database file: mv database.db olddatabase.db Now convert the pickle file to a new format database: python2 does Python use indentation for grouping of statements? Guido van Rossum believes that using indentation for grouping is extremely elegant and contributes a lot to the clarity of the average Python program. Most people learn to love this feature after awhile. a long time at it themself. This makes programs considerably longer and wastes valuable screen space, making it harder to get a good overview of a program. Ideally, a function should fit on. 4.4 Why are Python strings immutable? There are several advantages. One is performance: knowing that a string is immutable makes it easy to lay it out at construction time -- fixed and unchanging storage requirements. This is also one of the reasons for the distinction between tuples and lists. The other is that strings in Python are considered as "elemental" as numbers. No amount of activity will change the value 8 to anything else, and in Python, no amount of activity will change the string "eight" to anything else. 4.5 Why must 'self'. 4.6... 4.7(), apply() that for string operations Python has moved from external functions (the string module) to methods. However, len() is still a function. 4.8'], ", "). 4.11. 4.12 Why can't lambda forms contain statements? Python lambda forms form instead of a locally-defined function is that you don't need to invent a name for the function -- but that's just a local variable to which the function object (which is exactly the same type of object that a lambda form yields) is assigned! 4.13. 4.14 need not explicitly manage memory.; this will work regardless of GC: for file in <very long list of files>: f = open(file) c = f.read(1) f.close() 4.15 sys.exitfunc() hook to run a function that will force those deletions. 4.16. 4.17. 4.18. 4.19.__cmp__(o2)==0). 4.20in - sorted() - has been added. This function creates a new list from a passed iterable, sorts it and returns it. As a result, here's the idiom to iterate over the keys of a dictionary in sorted order: for key in sorted(dict.iterkeys()): ...do whatever with dict[key]... Versions of Python prior to 2.4 need to use the following idiom: keys = dict.keys() keys.sort() for key in keys: ...do whatever with dict[key]... 4.21 How. 4.22 Why are default values shared between objects? This type of bug commonly bites neophyte programmers. Consider this function: def foo(D={}): # Danger: shared reference to one dict for all calls ... compute something ... D[key] = value return D The first time you call this function, D contains a single item. The second time, D contains two items because when foo() begins executing, D starts out with an item already in it. It is often expected that a function call creates new objects for default values. This is not what happens. Default values are created exactly once, when the function is defined. If that object is changed, like the dictionary in this example, subsequent calls to the function will refer to this changed object. By definition, immutable objects such as numbers, strings, tuples, and None, are safe from change. Changes to mutable objects such as dictionaries, lists, and class instances can lead to confusion. Because of this feature, it is good programming practice to not use mutable objects as default values. Instead, use None as the default value and inside the function, check if the parameter is None and create a new list/dictionary/whatever if it is. For example, don't write: def foo(dict={}): ... but: def foo(dict=None): if dict is None: dict = {} # create a new dict for local namespace This feature can be useful. When you have a function that's time-consuming to compute, a common technique is to cache the parameters and the resulting value of each call to the function, and return the cached value if the same value is requested again. This is called "memoizing", and can be implemented like this: # Callers will never provide a third parameter for this function. def expensive (arg1, arg2, _cache={}): if _cache.has_key((arg1, arg2)): return _cache[(arg1, arg2)] # Calculate the value result = ... expensive computation ... _cache[(arg1, arg2)] = result # Store result in the cache return result You could use a global variable containing a dictionary instead of the default value; it's a matter of taste. 4.23.\\" 4.26 Why doesn't Python have a "with" statement like some other languages? Because such a construct" and similar language features (reduction of code volume) can, however, easily be achieved in Python by assignment. Instead of: function(args).dict[index][index].a = 21 function(args).dict[index][index].b = 42 function(args).dict[index][index].c = 63 write this: ref = function(args).dict[index] exception. 4.27.
http://python.org/doc/faq/general
crawl-001
refinedweb
1,066
67.35
In the previous article, we introduced the concept of object-oriented inheritance, attempted to place it laymen's terms, and then also took a high-level look at the conceptual model of how it works within the context of programming. But before we go any further and/or if you're just joining in the series, please review everything we've covered thus far by reading the previous articles: - An Introduction - Classes - Types - Control Structures: Conditional Statements - Control Structures: Loops - Functions and Attributes - Scope - Building the Plugin I - Building the Plugin II - Document the Plugin I - Document the Plugin II - Inheritance I Yes - we've covered a lot, but in order to lay the foundation for a beginner to have a strong place from which to start writing object-oriented PHP, there's a lot to examine. With that said, inheritance is where we begin getting into the intermediate topics of the paradigm, so this will be the final article that provides a look at the beginner concepts after which we'll end the series with a summary post. Inheritance Reviewed Recall that we defined inheritance as the following: Inheritance is when one class serves as the parent class for a child class that provides a number of attributes and methods common to both the parent and child; however, the child as the ability to introduce it's own attributes. It's a bit less formal than what you may find in an academic book, or even on Wikipedia, but it still explains the idea in terms that illustrate the point. In this article, we'll review all of the necessary code, functions, and reserved words related to the topic, we're going to take a look at how we can implement it within PHP in a very, very simple platform-agnostic way, and then we'll review an actual implementation of where inheritance is at play within WordPress. So with that set as our roadmap for the article, let's go ahead and get started. PHP Facilities In order to implement inheritance in object-oriented PHP, there are a number of reserved words with which we need to familiarize ourselves. Luckily, most of the words we've already covered, and those that we haven't are clear enough such that it's easy to remember them. So before we dive into looking at code, let's take a look at all of the reserved words in the language that we need to know so that we can begin to actually get into creating something. extendsis reserved word that indicates that one class is the child of another class. For example, in our previous article, a Postextends Content. We'll see this in play soon. privateis an attribute that is applied to properties and functions that mean they are accessible only within the context of the class in which they are defined. protectedis similar to privatewith the exception that the properties and methods that are marked as such can be accessed by the given class and any child classes. publicis the opposite of privatein that it means any class - the given class, a subclass, or a third-party class - can access the property or method in order to change it's information or call the function. You also need to be familiar with the :: operator, but we'll cover that a little bit later in the article when we begin looking at code. And that's it - nothing terribly daunting, is it? And what's even better is that if you've been following along with us throughout this series, then you're likely familiar with every word save for extends. Anyway, with that said, let's start working on an example. Some Example Code In order to get started writing some example code, we need to lay out exactly what it is that we're going to be trying to model (after all, that's what code does, isn't it?). In keeping consistent with the theme used throughout this series - especially in the last article - we'll have a parent class called Content, and two child classes each of which will be named Comment and Post, respectively. This will allow us to see how properties and methods exist within a single class, and how children can access attributes of their parents, as well as how parents can protect their properties and functions from their children, as well. But implementation will demonstrate far more than talking about it, so let's started writing some code. The Parent Class In our example, the parent class is going to be the Content because both of the subclasses - that is, the Comment - are types of content that have unique information associated with them that is not specific to the Content class. The key to inheritance is to identify all of the properties and methods that are common across all of the classes and keep them defined in the parent class or, in our class, in Content. Though this can vary based on how you view this, we'll setup Content such that it includes: - a date on which the content was published - an author - a method for saving the content - a method for formatting the content - a method for retrieving the content - and a method for retrieving the content author First, we'll look at the code, then we'll explain everything that's going on with it. <?php class Content { protected $publish_date; protected $author; private $content; public function __construct() { date_default_timezone_set( 'GMT' ); $date = new DateTime(); $this->publish_date = $date->format( 'Y-m-d H:i:s' ); $this->author = ''; } public function save( $author, $content ) { $this->author = $author; $this->content = $this->format_content( $content ); $this->content; } public function read() { return $this->content; } private function format_content( $content ) { return strip_tags( stripslashes( $content ) ); } public function get_author() { return $this->author; } } As previously mentioned, we have two protected attributes and a private attribute. Recall that this means that all of the subclasses can access the $publish_date and the $author, but only the Content can access the $content attribute. Also note that much of the code that you see in the above class is basic object-oriented PHP. There's nothing that stands out that deals directly with inheritance other than some of the access modifiers that we've declared. That is to say that it's relatively common to code we've seen thus far in this tutorial. One of the things that's worth noting is that the private function is in place to demonstrate two things: - How privatefunctions are only accessible within the context of the class in which its defined. - Strips any tags and slashes from the content so that markup cannot be saved with the content. Of course, this code isn't connected a database or a file system or anything, but the point still remains. Note that, in the code above there are a couple of things that we've needed to add in order to satisfy PHP's requirements. They are beyond the scope of this article, but it's worth pointing out here: - The code to date_default_time_setis required to set the timezone off of which the time can be retrieved. - The constructor is responsible for initially setting the publish date of the content, and it initializes the author property to an empty string. This is so that a Postcan have its own author and the Commentcan have its own author, as well. As we'll see later, a Commentcan even override the default publish date. Note also that we're able to retrieve the content from the read method and we're able to get the author from the get_author function. The First Child Next, let's go ahead and create the Post subclass. First, we'll take a look at the code and then we'll see how it interacts with the Content class we just created. <?php class Post extends Content { public function __construct() { parent::__construct(); $this->author = 'Tom McFarlin'; } public function post( $content ) { $this->format_content( $content ); } } The class appears small, right? There are no properties - because it inherits them from the Content class - and there are only two functions, one of which is unique to the class - Notice that in the constructor, we first make a call to the parent constructor using the :: operator. You can read much more about this in the manual, but suffice it to say that the operator is reserved to reference a number of difference things outside of the class in which it is defined. In our case, that's the call to the parent's constructor. Next, I've opted to set my name as the author of the post. Notice that I'm using the $this keyword. Because the subclass inherits properties from its parent, it can refer to those properties and if they were defined within itself. Note that this is possible not just because Post extends Content but because the property is marked as protected in Content, as well. If it were marked as private, this would not be possible. The Second Child Now that we've created the Post class, we also have the Comment class which, recall, represents someone leaving a a comment on a post. Were this production-level code, there would be far more code: We would need to relate a comment to a post, determine if a comment is a reply to an existing comment, mark a status of a comment, and so on. But for the purposes of demonstrating inheritance, we're leaving all of that out and focusing only on the things that can drive the concepts. <?php class Comment extends Content { public function __construct() { parent::__construct(); } public function add( $comment ) { $this->save( 'John Doe', $comment ); } } As you can see, the Comment code isn't much different from the Post code. To a degree - this is good because it shows that we've abstracted the proper parts into our base class. Anyway, notice that after we construct the Comment, we make our call to the parent constructor. Next, we define the add method which is responsible for taking the incoming comment and then saving it by passing the comment author and it's content to the save method. The nice thing is that the save method is already defined within the base class which also handles all of the formatting through the use of a private function, so we gain that functionality as we create our child class. Working with The Example With that done, let's run a couple of examples to show how the pieces fit together. To make sure this code executes, all you need is a web server, a directory out of which to run PHP scripts, and a text editor. First, we'll create an instance of Content and then we'll call a debug statement so that we can see what constitutes an instance of the class. $content = new Content(); var_dump( $content ); Permitting all works correctly, you should see everything that's above. Next up, let's go ahead and create a post. Since we're setting all of the information in the context of the class, all we really need to do is call a function on the class in order to display the information. For example: $post = new Post(); echo 'The post author is: ' . $post->get_author(); Again, since we've set everything up in the code itself, simply calling the method demonstrates the concept. Finally, we can create a Comment, call the add method on an instance of the class, attempt to pass in malicious code (only to see it stripped out by our code). If everything goes well, you should see the following: $comment = new Comment(); $comment->add( '<script type="text/javascript">alert("This is my comment.");</script>' ); echo 'The comment reads: ' . $comment->read(); And that's it: Our simple demonstration of inheritance. Inheritance in WordPress When it comes to looking at inheritance in WordPress, the very first thing that comes to mind for myself - and likely other developers - is the Widgets API. The reason I say this is because the API is powered by inheritance. Sure, widgets can be created without using the API, but I'd argue that's a misstep in development. Why make things more complicated for yourself when there's already a foundation in place to do it? But I digress. The nice thing about this particular API is that it showcases all of the high-points of object-oriented programming and inheritance at work. For example, here's a piece of sample code taken directly from the Codex: <?php class My_Widget extends WP_Widget { /** * Sets up the widgets name etc */ public function __construct() { // widget actual processes } /** * Outputs the content of the widget * * @param array $args * @param array $instance */ public function widget( $args, $instance ) { // outputs the content of the widget } /** * Outputs the options form on admin * * @param array $instance The widget options */ public function form( $instance ) { // outputs the options form on admin } /** * Processing widget options on save * * @param array $new_instance The new options * @param array $old_instance The previous options */ public function update( $new_instance, $old_instance ) { // processes widget options to be saved } } Now that we've covered the conceptual model, looked at the keywords and methodology, written our own code, and created our own example, this should be relatively easy to follow. But here's the thing: One of the best ways to get better at writing any type of code is to continually practice the concepts. That is, to explore the ideas written by other people who have done more advanced things that you in prior work. Case in point, take a look at the first example provided in the WordPress Codex. And if you're working with a later version of PHP that supports features such as namespaces (a slightly more advanced topic), then check out the second example, as well. The more you review the code and tease it apart, the more you are to learn about it. But going any further than that in this article will take us out of scope of the entire series. To The End At this point, we've covered all of the beginner material necessary to lay the foundation for a beginner's guide to writing object-oriented PHP. In the final article, we'll provide a summary of everything we've covered so that we have a single reference for the big ideas that can be bookmarked, saved, or referred to later. Additionally, we'll have a short period of discussion on a follow-up series, but we'll save that until then. For now, if you have any questions, comments, and/or feedback on the content, code, or examples above, feel free to do so in the comment section below. Tuts+ tutorials are translated into other languages by our community members—you can be involved too!Translate this post
http://code.tutsplus.com/articles/object-oriented-programming-in-wordpress-inheritance-ii--cms-21457
CC-MAIN-2015-32
refinedweb
2,477
53.95
![if !(IE 9)]> <![endif]>. Note. This article was originally published on stefansf.de. The original and translated versions are posted on our website with the permission of the author. The representation of a pointer is not defined by the C standard. However, operations involving pointers are defined—at least more or less. In the following we will have a look at these operations and how they are defined. Lets start with an introductory example: #include <stdio.h> int main(void) { int a, b; int *p = &a; int *q = &b + 1; printf("%p %p %d\n", (void *)p, (void *)q, p == q); return 0; } If compiled with GCC and optimization level 1, then a run of the program on a x86-64 Linux system prints: 0x7fff4a35b19c 0x7fff4a35b19c 0 Note that the pointers p and q point to the same memory address. Still the expression p == q evaluates to false which is very surprising at first. Wouldn't one expect that if two pointers point to the same memory address, then they should compare equal? The C standard defines the behavior for comparing two pointers for equality as follows: The first question which probably comes up is: What is an "object"? Since we consider the language C it has certainly nothing to do with objects as known from object oriented programming languages like C++. The C standard defines an object rather informally as: Lets be nit-picky. A 16 bit integer variable in memory is a data storage and can represent 16 bit integer values. Therefore it is an object. Should two pointers compare equal if the first pointer points to the first byte of the integer and the second pointer to the second byte of the integer? Of course this is not what the language committee intended. But at that point we should note that the language is not formally defined and we have to start guessing what the intention of the language committee was. Lets get back to our introductory example. Pointer p is derived from object a and pointer q is derived from object b. The latter involves pointer arithmetics and this is defined for the operators plus and minus as follows: Since every pointer which points to a non-array object is virtually lifted to a pointer of type array of length one, the C standard only defines pointer arithmetics for pointers of array types which is finally given in paragraph 8. The interesting part for our case is: That means, the expression &b + 1 should evaluate to an address without any problem. Hence p and q should be valid pointers. Recap what the C standard defines for comparing two pointers: "Two pointers compare equal if and only if [...] one is a pointer to one past the end of one array object and the other is a pointer to the start of a different array object that happens to immediately follow the first array object in the address space" (C11 § 6.5.9 paragraph 6). This is exactly the case in our example. Pointer q points one past the end of object b which is immediately followed by object a to which p points. Is this a bug in GCC? The finding has been reported in 2014 as bug #61502 and so far the GCC people argue that this is not a bug and therefore won't fix it. The Linux people ran into a similar problem in 2016. Consider the following code: extern int _start[]; extern int _end[]; void foo(void) { for (int *i = _start; i != _end; ++i) { /* ... */ } } The symbols _start and _end are used to span a memory region. Since the symbols are externalized, the compiler does not know where the arrays are actually allocated in memory. Therefore, the compiler must be conservative at this point and assume that they may be allocated next to each other in the address space. Unfortunately GCC compiled the loop condition into the constant true rendering the loop into an endless loop as described in this LKML post where they make use of a similar code snippet. It looks like that GCC changed its behavior according to this problem. At least I couldn't reconstruct the behavior with GCC version 7.3.1 on x86_64 Linux. Defect report #260 may apply in our case. The topic of the report is more about indeterminate values, however, there is one interesting response from the committee: Implementations [...] may also treat pointers based on different origins as distinct even though they are bitwise identical. If we take this literally, then it is sound that p == q evaluates to false, since p and q are derived from distinct objects that are in no relation to each other. It looks like we are getting closer and closer to the truth, or do we? So far we only considered operators for equality but what about relational operators? An interesting point is made while defining the semantics of the relational operators <, <=, >, and >=, in order to compare pointers: According to this definition comparing pointers is only defined behavior if the pointers are derived from the same object. Lets demonstrate the idea of this by two examples. int *p = malloc(64 * sizeof(int)); int *q = malloc(64 * sizeof(int)); if (p < q) // undefined behavior foo(); In this example the pointers p and q point into two different objects which are not related to each other. Hence comparing them is undefined behavior. Whereas in the following example int *p = malloc(64 * sizeof(int)); int *q = p + 42; if (p < q) foo(); the pointer p and q point into the same object and are therefore related. Hence it is sound to compare them—assuming that malloc does not return the null pointer. Storage Layout So far we didn't examine the standard w. r. t. the storage layout of objects. Let's consider objects of aggregate types first. An aggregate type is either a structure or an array type. The former is a sequentially allocated nonempty set of member objects. The only guarantee we get for members of a structure is that they are sequentially allocated in the given order. Thus, a compiler is not allowed to reorder members. However, nothing is said about the space between adjacent members. There we have that arbitrarily many padding bits may be added. For example, consider the following structure: struct { char a; int b; } x;. On most modern architectures between members a and b several padding bits are introduced—leaving it open how many these are since this depends on the alignment requirements of the type int. Therefore, deriving pointers from x.a and x.b and comparing them for equality results in undefined behavior whereas comparing them for relation as e.g. &x.a < &x.b results in defined behavior. For array types, we have that these describe a contiguously allocated nonempty set of objects. The crucial point is that in contrast to structure members array members are contiguously allocated. Thus not only the ordering of array members is defined but also that adjacent members are allocated without space in-between. This enables us to finally perform pointer arithmetics in a well defined manner on array members. For all other types, i.e., non-aggregate types, we have that the standard does not define the corresponding storage layout. Hence for our introductory example the storage layout of the variables a and b is not defined. Therefore, deriving pointers from the variables and comparing them results in undefined behavior. GCC exploits this fact and evaluates the expression p == q statically to false. The assembler output for the introductory example is given as follows if compiled with optimization level 1: .LC0: .string "%p %p %d\n" main: sub rsp, 24 mov ecx, 0 lea rdx, [rsp+12] mov rsi, rdx mov edi, OFFSET FLAT:.LC0 mov eax, 0 call printf mov eax, 0 add rsp, 24 ret The expression p == q is compiled into the assembler instruction mov ecx, 0. Different Array Objects It looks like we are getting closer and closer to the truth ;-) The most problematic part we stumbled across so far was in § 6.5.9 paragraph 6 where it is explicitly allowed to compare two pointers from two different array objects. Let's be philosophical. What are different array objects? According to the wording used in the standard each dimension of a multidimensional array is an array on itself. A modified version of our introductory example containing a multidimensional array is given as follows: #include <stdio.h> int main(void) { int x[2][1]; int *p = &x[0][1]; int *q = &x[1][0]; printf("%p %p %d\n", (void *)p, (void *)q, p == q); return 0; } Pointer p points one past the last element of an array object which is part of a multidimensional array object. Pointer q points to the first element of an array object which is adjacent to the array object from which p is derived from. Since both arrays are part of a multidimensional array it is defined behavior to compare p and q for equality. Thus p == q always evaluates to true. GCC and Clang evaluate the expression at compile time to true, i.e., emit the assembler instruction mov ecx, 1 for all optimization levels but 0. The important part in the example is that &x[0] points to a different array object than &x[1]. However, this is not explicitly stated in the C11 standard but is written between the lines. We started with an innocent looking example and stumbled across several pitfalls which led to undefined behavior. Our introductory example has the same problem as the example from the Linux people: Comparing two pointers which are derived from two completely unrelated objects invokes undefined behavior. It does not matter if the objects have external or internal linkage, or if they have automatic storage duration or not. The most problematic part was in § 6.5.9 paragraph 6 where it is explicitly allowed to compare two pointers from two different array objects. At this point in time I would have expected at least a single sentence stating that both pointers must be derived from two arrays which are subaggregates of the same multidimensional array. The wording became even more confusing in § 6.5.8 paragraph 5 where the relational operators are defined. There the standard only speaks of pointers to the same array object. In my humble opinion, speaking of different arrays for each dimension of a multidimensional array is misleading. Philosophically speaking isn't an element of an array object which is a subaggregate of a multidimensional array object also an element of the multidimensional array object? If so, then two elements e1,e2 of two different array objects a1,a2 which are subaggregates of the same multidimensional array object x are also two elements of the same multidimensional array object x. Then two pointers p1,p2 pointing to the elements e1,e2 also point to different array objects a1,a2 and simultaneously to the same array object x. Thus, same and different become superfluous and confuse more than help. The overall feeling regarding the wording of the C11 standard is unsatisfactory w. r. t. the presented problem. Since several people already stumbled across this, the question which is left is: Why not make the wording more precise? The takeaway message is that pointer arithmetic is only defined for pointers pointing into array objects or one past the last element. Comparing pointers for equality is defined if both pointers are derived from the same (multidimensional) array object. Thus, if two pointers point to different array objects, then these array objects must be subaggregates of the same multidimensional array object in order to compare them. Otherwise this leads to undefined behavior. If you are interested in related work I can recommend this one: Clarifying the C memory object model (n2012) If we lookup the C11 standard and read about pointer arithmetics and comparison we find exceptions for pointers which point one past the last element of an array all over the place. Assume it wouldn't be allowed to compare two pointers derived from the same array object where at least one pointer points one element past of the array, then code like this const int num = 64; int x[num]; for (int *i = x; i < &x[num]; ++i) { /* ... */ } would not work. Via the loop we iterate over the array x consisting of 64 elements, i.e., the loop body should be evaluated exactly 64 times. However, the loop condition gets evaluated 65 times—once more than we have array elements. In the first 64 evaluations, the pointer i always points into the array x whereas the expression &x[num] always points one element past the array. In the 65th iteration the pointer i also points one element past the array x rendering the condition of the loop false. This is a convenient way to iterate over an array which makes the exception for arrays feasible. Note, the standard only defines the behavior of comparing such pointer—dereferencing pointer is another topic. Can we change the example such that no pointer points one past the last element of array x? Well, the solution to that is not straight forward. We have to change the loop condition and also make sure that at the end of the loop we do not increment i anymore. const int num = 64; int x[num]; for (int *i = x; i <= &x[num-1]; ++i) { /* ... */ if (i == &x[num-1]) break; } This code is rather cluttered with technical details which we do not want to deal with and which distracts us from the actual job we want to accomplish. Despite that it also contains one additional branch inside the loop body. Hence, I think it is reasonable to have exceptions for pointers one past the last element of an array. Note by PVS-Studio team When developing the PVS-Studio code analyzer, we occasionally have to deal with subtle moments to make diagnostics more accurate or to provide detailed advices to our clients. This article seemed interesting for us, as it touches upon issues in which we do not feel confident enough. Therefore, we asked the author for a permission to publish this article and its translation on the website. We hope that by doing this, more C and C++ developers will be acquainted with it. We also expect them to become aware of the fact that not everything is so simple and when the analyzer happens to generate a strange warning, it should not be immediately taken as a false positive :). The article was first published on stefansf.de. The original and the translation are published on our website with the permission of the ...
https://www.viva64.com/en/b/0576/
CC-MAIN-2019-09
refinedweb
2,455
53.71
Assumptions: This article assumes you know about the neat little trick to use Vue as an event bus to communicate between your components. If not, read the article Creating a Global Event Bus with Vue.js by Joshua Bemenderfer. Why? I started off using the EventBus in my components and the implementation looked like this, mounted() { EventBus.$on('some-event', this.handleEvent) }, destroyed() { EventBus.$off('some-event', this.handleEvent) } This got a little cluttered as the number of events increased and the worse part? you can forget turning off the events. Solution The two solutions the came up to my mind were, - Mixin to auto destroy events - A dummy component that handles the mountedand destroyedlogic. Personally, I don't like mixins which is why I didn't even think twice about the point 1. The dummy component seemed more fun to write. Additionally, event listeners in html templates looked a lot better than their JS counterpart (and it conforms to the idea behind). This is how the component for EventBus looks like, import _ from 'lodash' import { EventBus } from '../common/eventBus.js' export default { data() { return { events: Object.keys(this.$listeners), methods: _.mapValues(this.$listeners, (value, key) => this.handleEvent.bind(this, key)) } }, mounted() { this.events.forEach(event => { this.methods[event].SOMETHING = event EventBus.$on(event, this.methods[event]) }) }, destroyed() { this.events.forEach(event => { EventBus.$off(event, this.methods[event]) }) }, methods: { handleEvent(name, ...args) { this.$emit(name, ...args) } } } The demo is up at. Footnotes This is my first ever post on any platform so looking forward to your comments. Discussion
https://dev.to/basitali/renderless-eventbus-component-for-vuejs-jpg
CC-MAIN-2020-50
refinedweb
260
52.46
Routes In Ember, routes determine what code is executed and which view is displayed. One thing to note that Ember has only one router i.e. all requests are sent to the router router.js which determines which route to call. How to Create Ember Route ember generate route name --pod The above will create two files route.js and template.hbs inside name folder. The route.js file code is shown below : import Ember from 'ember'; export default Ember.Route.extend({ }); The router.js will be updated with the following code : import Ember from 'ember'; import config from './config/environment'; const Router = Ember.Router.extend({ location: config.locationType }); Router.map(function() { this.route('name'); }); export default Router; SubRoutes Ember also has a concept of Subroute which is nothing but a route inside a route or simply child route. If we need to create a subroute for our name route then we will run below command ember generate route name/childname --pod After this our router.js will be automatically updated with the new code as shown below, where its added a childname as a route to name. Router.map(function() { this.route('name', function() { this.route('childname'); }); }); Now you can see a childname folder inside name folder which also has route.js and template.hbs as shown below. Dynamic Routes A dynamic route is a portion of a URL that starts with a : and is followed by an identifier. Now to create a dynamic route you need to create a route with a parameter as shown below where id will become the name of the variable which will be going to the model to get that specific value. this.route('childname', {path: ':id'}); Redirecting to New Route If you need to send someone to a new route you can use transitionTo which reads the routes table and determines the appropriate URL. The code to route is shown below this.transitionTo('name') // for route or this.transitionTo('name.childname') //for subroute or this.transitionTo('name.childname', model) // for dynamic routes Please Like and Share the CodingDefined Blog, if you find it interesting and helpful.
https://www.codingdefined.com/2016/07/getting-started-with-routing-in-emberjs.html
CC-MAIN-2020-05
refinedweb
354
59.09
We override equals() method in Java to check if two objects are equal. Before overriding equals() method in Java, first let's see when two objects are considered to be equal. Two objects are considered to be equal when they are identical (contain the same data) or in other words they are in the same state. In order to compare two objects for equality, we need to override equals() method because it is originally defined in Object class that takes a parameter of type Object and compares it with this reference. It does not make member level comparison. Following is the piece of code of equals() method from Object class. The above implementation of equals() will only return true if two references point to the same object in memory because it compares memory locations with == operator rather than comparing contents. If two objects having same data are stored at different locations in memory then above implementation will return false. So, when we define our own data types (classes), we need to override equals(). Java's convention is that equals() must be an equivalence relation. Therefore, overridden equals() method must have the following properties: x.equals(x)is true. x.equals(y)is trueif and only if y.equals(x). x.equals(y)and y.equals(z)are true, then so is x.equals(z). In addition to the above rules, equals() must take an Object as argument and satisfy the following properties. x.equals(y)consistently return the same value, provided neither object is modified. x.equals(null)returns false. By following above rules, let's implement equals() method for the class Person as follows: class Person { private String fname; private String lname; private int age; public Person(String fname, String lname, int age) { this.fname = fname; this.lname = lname; this.age = age; } //Overriding equals public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (this.getClass() != obj.getClass()) return false; Person that = (Person) obj; if (this.age != that.age) return false; if (!this.fname.equals(that.fname)) return false; if (!this.lname.equals(that.lname)) return false; return true; } } public class EqualsDemo { public static void main(String str[]) { EqualsDemo.java D:\JavaPrograms>java EqualsDemo true true true In above piece of code class Person has an overridden equals() method, which took the following step-by-step approach: true. This test saves the work of doing all the other checks in this case. null, return false. false. To determine an object's class, we use getClass(). Note that we can use == to tell us whether two objects of type Classare equal because getClass()is guaranteed to return the same reference for all objects in any given class. Objectto Person(this cast must succeed because of the previous test). falseif any instance variables do not match. Once you have overridden equals(), you have made your class comparable. Hope you have enjoyed reading how to override equals method
http://cs-fundamentals.com/tech-interview/java/how-to-override-equals-method.php
CC-MAIN-2018-17
refinedweb
492
57.47
ADO.NET provider 5.6.0.0 for Firebird is ready (with .NET Core support) New version 5.6.0.0 of ADO.NET provider for Firebird is ready for download. There’s really only one new feature in this release – .NET Core support! The port was sponsored by Integrative, so pay them a visit on the website. Right now the target is netstandard1.6. So you can use it on .NET Core 1.0 ( netcoreapp1.0) and .NET Framework 4.6.3 (when it’s available). Every feature, including the Embedded mode (whoa), is supported except for Schema namespace. I’m waiting for netstandard2.0 (probably) when the .NET Core will have clear(er) vision around that. Next big stop? Entity Framework Core 1.0/1.1. Overview of changes can be found in tracker. You can get the bits from NuGet FirebirdSql.Data.FirebirdClient and EntityFramework.Firebird (or from firebirdsql.org).
https://www.tabsoverspaces.com/233580-ado-net-provider-5-6-0-0-for-firebird-is-ready-with-net-core-support
CC-MAIN-2022-33
refinedweb
153
74.25
Configuring With solano.yml¶ Note For quick-start configuration examples for specific languages, head over to Language-Specific Guides. Read on for examples of other scenarios, like controlling which databases to start, setting environment variables, and setting timeouts. Punctuation and spacing are meaningful in YAML. You should check that your YAML is valid and parses as expected. For instance, you can test your YAML online or with import yaml; print yaml.load(foo) in Python. Solano CI will look for a solano.yml file, either at the root of your repository or in a config/ subdirectory, and load settings from the “tddium” section. A complete reference is available. You can use this configuration file to control: - The setup procedure Solano CI will use to install, configure, and boot your app. - The tests (and other build steps) that Solano CI will run, either as list of glob patterns used to select or exclude tests to run by default (e.g., in CI) or as a list of commands. - The service subsystems (like Postgres, Memcache, Sphinx, Solr, or ElasticSearch) that are started for your tests. This control is mostly useful if Solano CI can’t properly guess what you’ll need running. A sample solano.yml for a Ruby project is shown below: # config/solano.yml --- test_pattern: - features/*.feature - features/api/*.feature - spec/**/*_spec.rb - test/**/*_test.rb postgresql: version: '9.1' mysql: version: '5.5' sqlite: false phantomjs: version: '1.8.1' This configuration tells Solano CI to: - Select cucumber features in the features directory and the features/api subdirectory, all specs and all test unit tests. In this example, there’s a features/paid_api directory that contains tests not meant for CI. - Enable the Postgres subsystem, even if the “pg” gem is not in the Gemfile. - Disable the Sqlite subsystem even if the “mysql” gem is in the Gemfile. If a subsystem is not mentioned in the configuration file, Solano CI will probe for a dependency in your code (e.g., a Gemfile, requirements.txt, or packages.json) and enable the subsystem if and only if the corresponding gem is found. If the subsystem is mentioned in the configuration and the value in the configuration is “false”, the subsystem will not be enabled even if your code lists a dependency. If the value is any value that evaluates to true, then the subsystem will be enabled unconditionally. Some subsystems, such as the Postgres subsystem, allow you to specify a string value that specifies the version to start. See the documentation for each subsystem for further details. Setting Environment Variables¶ Solano CI sets a number of environment variables for you automatically. A complete list of environment variables set by Solano CI is available here. You can also set environment variables in your build environment. For non-sensitive values, we recommend adding key-value pairs directly to your configuration file. You can also use so-called config variables to store sensitive values such as API keys securely. More information on both methods is available here.
http://docs.solanolabs.com/RunningBuild/configuring-with-solanoyml/
CC-MAIN-2017-26
refinedweb
503
59.19
First - a little context. WebMatrix data access is based on a library called WebMatrix.Data. It was originally called Microsoft.Data when the whole thing was in Beta. The main feature of WebMatrix.Data is that it works with raw SQL. Depending on which side of the fence you peer over, this is both a "good thing" and a "bad thing". One of the core principals behind the design of the Web Pages framework was to keep the "concept count" low. Microsoft knew through research that ASP.NET could be a forbidding experience to people who know little about server side web development. In the Web Forms framework, there are plenty of new things to learn which are foreign to all other frameworks, such as ViewState, the Page Lifecycle, an eventing model and so on. MVC is an advanced architecture designed for ease of testability, separation of concerns and what have you, which takes a lot of learning if all your sole programming experience up to now has been the odd function in Excel. If you look at classic ASP and PHP, for example, you get a totally different experience. At the simplest level, data access is often performed through direct calls to the database within code files which show a mixture of programming logic and HTML, and for relatively simple sites, this is acceptable. There are vast numbers of "simple" sites out there, and huge numbers of people who only ever want to build such simple sites. They do not need the architectural embellishments that come with MVC. Not every site is a "software engineering project". But the key thing is that if you are just learning how to build dynamic web sites, finding help on SQL is simple. There are millions of examples of SQL all over the web. On the other hand, complex sites (applications, really) do need to be carefully architected. A lot of bloggers work full-time on such projects, and know the pitfalls of intermixing logic with HTML and inlining SQL in these kinds of projects. They know, for example, that they do not want to have to change code all over the application if a field name changes in the database. And they know that applying automated tests to this type of project is nearly impossible. Some of these people have forgotten (it seems to me) about the other side of web development, or what it was like when they first started out. They think that every web site is indeed a software engineering project, and all should follow "best practice", although what that is depends on your point of view. It's generally agreed, though, that having inline SQL in these types of projects is not a best practice. The three projects I looked at are: - WebMatrix.Data.StronglyTyped by Jeremy Skinner - Massive by Rob Conery - Simple.Data from Mark Rendle As of now, they are at varying levels of completeness. But they are all available via the Web Pages Package Manager. If you want to play with them yourself, you need to make sure that you choose "Default (All)" as your Source option. It would be interesting to see if any of the packages make it into the Default listing at some stage.... Each of the packages offers different solutions to different problems. Consequently, each one of them has their own set of features, and either embraces WebMatrix.Data, or replaces it completely. I'll look at Jeremy Skinner's library first. WebMatrix.Data.StronglyTyped The concern that Jeremy primarily wanted to address was that WebMatrix.Data returns a Dynamic object. The dynamic type is new in C# 4.0, and offers some huge benefits, but has some downsides. For one thing, you lose any compile time checking and IntelliSense. For most WebMatrix users, this won't be an issue. WebMatrix itself offers no features in this area (in v 1.0), but if you decide to use the Launch button in the main menu to open your site in Visual Web Developer or Visual Studio, a new world of information awaits you. As you hover over objects in code, tooltips give you more information about them, and dropdowns appear which allow you to pick properties and methods which the object is known to expose. When you run a query using WebMatrix.Data, objects are created dynamically from the resulting data. The object is populated with properties which are generated from the schema information that comes with the returned data. This is great, because you can treat database field names as if they are actually properties on a type. But since the properties don't exist until the database call has been made, when you are typing code, no help is available to tell you if the "property" you are currently referencing in your code will actually exist at runtime. Here's a typical WebMatrix.Data example: @{ var db = Database.Open("Northwind"); var sql = "Select * From Products Where CategoryId = @0 Order By ProductName"; var products = db.Query(sql, 4); } @foreach(var product in products){ @product.ProductName<br /> } There is a field in the Products table in the database called "ProductName", but when you type product.ProductName, WebMatrix (or the C# compiler, really) doesn't know this. You could just as easily type product.ProductsName (notice the extra "s"), and you wouldn't be told anything was wrong until you ran the page. All you are told at the time you type (in VS) is that ProductsName is a dynamic expression which will be resolved at runtime. WebMatrix.Data.StronglyTyped is a wrapper around WebMatrix.Data which enables you to convert the dynamic type to a strongly typed object. You need to define your strongly typed object in a class, but that is simple. Here's one that shows a Product class definition to match the database table: using System; /// <summary> /// Summary description for Product /// </summary>; } } Now when you use WebMatrix.Data.StronglyTyped, you can project the result of the query into a series of Product objects: @{ var db = Database.Open("Northwind"); var products = db.Query<Product>("Select * From Products Where CategoryID = @0", 4); } @foreach(var product in products){ @product.ProductName<br /> } As you type the dot after product, IntelliSense provides a list of properties you have defined in the class that you may use. No more runtime errors resulting from typos. But there is another thing this library helps with. Let"s assume that you want to flag up all products that have fewer units in stock than their reorder level. Using WebMatrix.Data, there are a couple of ways to do this. You could change the SQL to perform a calculation on the field values and return an additional column of data which would become an additional property on your dynamic object - depending on the functions that your database supports. Or you could put a conditional in your Razor code: @foreach(var product in products){ if(product.UnitsInStock < product.ReorderLevel){ @product.ProductName<br /> } } Now that's OK, but what if the condition becomes more complex, or you need to apply this test in multiple pages? Your Razor code will start to become less and less manageable. WebMatrix.Data.StronglyTyped answers this problem because it allows you to define additional properties on your object, so you can enhance your Product class with other properties:; } public bool ShouldReorder { get { return UnitsInStock < ReorderLevel ? true : false; } } When Product objects are created, they now have a ShouldReorder property which will be true or false, depending on the comparison between two other fields. And of course, that property appears in IntelliSense. If, after a while, you also want to take account of the number of units on order to determine whether the product has reached the point where a new order should be made, you only need to alter your code in one place. WebMatrix.Data.StronglyTyped doesn't alter the fact that you are using WebMatrix.Data. You don't really need to learn anything new to use it. You still use SQL and the standard WebMatrix.Data.Database methods. That means that relationships can be navigated using familiar Joins in SQL. Notice that the Product class has a Category property? That is defined in a separate class: /// <summary> /// Summary description for ClassName /// </summary> public class Category { public int CategoryID { get; set; } public string CategoryName { get; set; } public string Description { get; set; } public byte[] Image { get; set; } public int TotalProducts { get; set; } } This mirrors the Categories table in the database, but features an additional property called TotalProducts. If you wanted to get the total number of products by category, you simply do this: @{ var db = Database.Open("Northwind"); var categories = db.Query<Category>(@"SELECT CategoryName, Count(Products.CategoryID) AS TotalProducts FROM Products INNER JOIN Categories ON Categories.CategoryID = Products.CategoryID GROUP BY CategoryName"); } Writing the results to the browser is equally simple: @foreach(var category in categories){ @:@category.CategoryName (@category.TotalProducts)<br /> } Massive Rob Conery has approached things slightly differently. The Massive library (it's one C# code file) doesn't use WebMatrix.Data. It replaces it. It is based on the Active Record pattern, which treats the underlying database tables as representing objects with properties based on the column names. The properties on the objects are generated dynamically when data is retrieved from the database. There is no formal definition of the object as was the case for WebMatrix.Data.StronglyTyped. So far, there is little difference between Massive and WebMatrix in terms of how objects and properties are generated, but Massive also provides a number of methods on those objects, such as All, Insert, Update, Delete and so on, which is much more characteristic of the Active Record pattern. When using Massive, you need to define a connection string in your web.config file. The WebMatrix team have done a lot to shield beginners from web.config files, but anyone who has used one of the other flavours of ASP.NET will be familiar with them. They contain general settings and configurations for your application. When you add a new file to WebMatrix, pick the "Suggested" or "All" option and find Web.Config (4.0). This is what adding a connection string looks like. Notice that the connection string has been given a name: <?xml version="1.0"?> <configuration> <system.web> <compilation debug="true" targetFramework="4.0"/> </system.web> <connectionStrings> <add name="northwind" connectionString="Data Source=|DataDirectory|Northwind.sdf;" providerName="System.Data.SqlServerCe.4.0" /> </connectionStrings> </configuration> The |DataDirectory| part is a substitution string which ASP.NET knows points by default to the App_Data folder within a web site. You need to set up something else for Massive, and that is a definition for the objects that map to database tables. The following code maps to the Products table: using Massive; public class Products : DynamicModel { public Products() : base("northwind") { PrimaryKeyField = "ProductID"; } } This is not as involved as WebMatrix.Data.StronglyTyped. It maps the primary key field, and passes the name of the connection string to Massive. As I type, there appears to be an update to Massive which means that the connection string won't be required. Massive will look for the first connection string in your web.config file, but that change hasn't been pushed up to NuGet yet. Your Products class inherits from DynamicModel (that's what the colon is for), which is what actually the object that contains definitions for the Insert, All, Update etc methods. In the first example, we got all products with a CategoryID of 4 using SQL. You can do exactly the same with Massive by using its Query method: @{ var Products = new Products(); var products = Products.Query("Select * From Products Where CategoryId = @0 Order By ProductName", 4); } Alternatively, you can use The All method and pass in some optional parameters: @{ var Products = new Products(); var products = Products.All(where: "CategoryID = @0", args: 4); } This will construct the SQL dynamically, using parameters to prevent any chance of the threat of SQL injection. The All method will return the entire table, so it's the equivalent of SELECT *. But each of the methods are available through IntelliSense, which can help save some time once you have learned what the methods do and what parameters they accept. There aren't that many, so it shouldn't take long. You can always fall back on an SQL statement if you only wanted to return some columns. Again, the latest update will introduce another parameter: columns, where you can pass just the columns that you would like to return. The return type is dynamic, so you get no IntelliSense or compile time type checking in Visual Studio. If you wanted to return the total number of products in each category, SQL is probably the best way to go: @{ var Categories = new Categories(); var categories = Categories.Query(@"SELECT CategoryName, Count(Products.CategoryID) AS TotalProducts FROM Products INNER JOIN Categories ON Categories.CategoryID = Products.CategoryID GROUP BY CategoryName"); } Massive also supports transactions, which means that if you want to execute multiple commands consecutively, but only want them to succeed if all succeed, you can do so. There seems little point in using Massive if you are happy using SQL. It's primary attraction is the set of methods that translate to SQL commands behind the scenes. If you want to move away from using SQL directly, it's a nice little tool. Simple.Data I'm going to start off by pointing out that this library is in its early stages, having just reached version 0.5. It is not complete. The library again follows the Active Record pattern, and attempts to provide a relatively easy way for beginners to access data without plugging SQL in their code, which Mark feels is just "wrong". It seems that Mark mistakenly believed that WebMatrix.Data did not offer any support for parameters (which is does), but despite that, it is possible to use WebMatrix in an unsafe way if you concatenate user input directly into your SQL. The result of Mark's work is a separate data access framework, which like Rob's doesn't rely on WebMatrix.Data. Simple.Data works with dynamic types just like Massive and WebMatrix.Data. It is likely to appeal more to people who migrate from PHP to ASP.NET, in that they may be more familiar with the Active Record pattern and how it works. There are no options to fall back on SQL. Simple.Data supports a growing range of databases, not just SQL Server and SQL CE, and as a lightweight data access framework, it holds a fair amount of promise. I feel though that it is unlikely to appeal to people who know nothing about databases and SQL. Simple.Data needs a connection string. Again, this can be defined in web.config, but must have a specific name (at least, at the moment): <?xml version="1.0"?> <configuration> <system.web> <compilation debug="true" targetFramework="4.0"/> </system.web> <connectionStrings> <add name="Simple.Data.Properties.Settings.DefaultConnectionString" connectionString="Data Source=<full path to db>\Northwind.sdf;" providerName="System.Data.SqlServerCe.4.0"/> </connectionStrings> </configuration> Alternatively, you can provide the connection string in the OpenFile method: @using SD = Simple.Data; @{ var db = SD.Database.OpenFile(Server.MapPath("~/App_Data/Northwind.sdf")); } If you are using WebMatrix, WebMatrix.Data is referenced by default. That introduces a bit of a problem in that Simple.Data also defines a Database class. Therefore, any reference to the Database class will be ambiguous. The easiest way to resolve this is the reference Simple.Data using an alias. I've used SD as my alias for Simple.Data. You could of course simply use the fully qualified namespace: Simple.Data.Database.OpenFile() but that can become boring to type after a while. Beyond that, no configuration (class definition files) is required. The Database class has an All method, just like Massive, but it also has a variety of Find methods: FindAll, FindBy, FindAllBy. The last of these allows you to append the filed name which acts as a filter, and pass in the value. It's like this that Simple.Data helps to avoid any SQL injection problems. Here's one way to get all products with a CategoryID of 4: @{ var db = SD.Database.OpenFile(Server.MapPath("~/App_Data/Northwind.sdf")); var products = db.Products.FindAllByCategoryID(4); } @foreach(var product in products){ @product.ProductName<br /> } Simple.Data also features a number of other commands for inserting, deleting and updating data. It is also capable of traversing relationships which are defined by foreign keys. Getting the count of products in each category is not as clear cut as it would be using SQL, and I found that the only way I could work out how to do this is not good on performance: @{ var categories = new List<string>(); products = db.Products.All(); foreach(var product in products){ foreach (var category in product.Categories){ categories.Add(category.CategoryName); } } var result = categories.GroupBy(c => c).Select(g => new { Category = g.Key, Count = g.Count() }); foreach(var item in result){ @:@item.Category (@item.Count)<br /> } } I hope I've got this badly wrong, because what happens is that all products from the Products table are retrieved, and then the Category Name associated with the Product's CategoryID is retrieved within a loop, meaning that a new database query is fired for every product. All of the Category names are loaded in a List, and then that's subjected to some Linq to obtain the de-duplicated category names along with the total for each one. So which should you use? If you are just starting out, I would strongly recommend that you use the standard WebMatrix.Data library. WebMatrix.Data.StronglyTyped certainly has a place if you want to move towards building your application around objects. Massive is a nice little utility but seems to be more of an experiment by the author around the whole C# dynamics thing. It will be interesting to see if and how it develops. Finally, Simple.Data promises a lot, but is not ready for Prime Time yet. According to Mark Rendle's blog, Simple.Data appears to have started out as a proof of concept and probably wasn't intended to grow as much as it has. A huge amount of work has already gone into Simple.Data, and it will be interesting again to see if this is developed further and in what way. A download containing samples for each library, including WebMatrix.Data is available here
https://www.mikesdotnetting.com/article/172/data-access-choices-for-webmatrix
CC-MAIN-2020-40
refinedweb
3,089
56.45
The Action tag is used to call action class directly from a JSP page. We can call action directly by specifying the action name and an optional namespace.The body content of the tag is used to render the results from the Action. The following Example will shows how to implement the Action tag in the Struts2.2.1 -- First we create a JSP file named ActionTag .jsp as follows. If the executeResult=?true? is specified in the action tag, the method is executed and the result page will be displayed directly; Otherwise, it just execute the method, no result page will be displayed. The index.jsp file is as follows- This file only contains the hiperlink only. The Struts mapping file Struts.xml is as follows- The action class ActionTag.java is as follows. This Program produces output on the basis of the Action tag evaluation, This give the output as- Output:- Download Select Source Code Advertisements Ads Ads
https://www.roseindia.net/struts/struts/struts2.2.1/tags/Actiontag.html
CC-MAIN-2021-04
refinedweb
160
67.96
the program asks for an amount of money then gives back the smalest number of coins needed to equal that amount of money. in order to convert my input from type doubl to type int without loosing the info to the right of the decimal i first multiply by 100, then do my conversion. however certin numbers give back incorect awnsers. 0.07 should equal 7.00 but doesnt. also 2100.01 gives the wrong awnser also.. any 2100 number with an odd hundredths number is wrong here is the isolated code. import java.util.Scanner; public class coinsMultiplyError { public static void main(String[] args) { Scanner keyboard = new Scanner(System.in); System.out.println("Please enter amount of monney"); double total = keyboard.nextDouble();//input from user total=total*100; System.out.println(total); } } is there any other way to isolate the decimal numbers and convert to int? i tryed keyboard.useDeliniator(".") as a substitute for useing a space between the dollars and change.it would compile but then iit would yell at me when i tryed to run it.
http://www.dreamincode.net/forums/topic/63256-type-double-multiplication-errors/
CC-MAIN-2016-30
refinedweb
179
68.36
The COVID-19 pandemic has changed the way we interact with people, things, and the world around us. Social distance enablement is one and only effective method to prevent the spreading of COVID-19. The whole developers in the world are trying to create software or hardware related products to wipe out the spreading of disease. Here we are dealing with the Physical queues. So physical queues are one of the major threats for the social distancing act. Being in a physical queue there are huge chances of spreading the disease. In a certain queue, we need to receive physical tokens from the counter that will also increase the chances of getting the disease. Peoples hate nothing more than standing in queues, uncertain when their turn will come to receive their service. Long queues will probably result in tangles. So maintaining a physical queue is actually a big risk for the service provider(The authority who provides services) and also for the peoples who need to be in the queue. The virtual queue and tokens are an effective way to handle the queue these days. But most of the virtual queue needs a dedicated complex software and the internet connection. Poor internet connection will probably bring you to the last in a queue. For handling the complex software the person needs little skills. So here we are introducing the Queue Management Node(QMN) which is a simple user-friendly smart queue maker powered by Arduino MKR WiFI 1010. Let's watch the video of QMN.How Does It Work? Queue Management Node(QMN) is the device that is creating smart tokens. For creating the smart tokens, the person should be in the wifi range of Arduino MKR 1010. The person also needs a smartphone to get the process done. The workflow will go as follows - A WI-FI access point will be created by Arduino MKR 1010. - The person who wants token needs to connect the phone to the access point & that will be redirected to the localhost. - On that page, the person needs to enter his/her phone number. At that instant, OTP will be sent to the concerned number to verify it. The phone number is taken intentionally to give notification. - After verifying the phone number, the token will be displayed on the localhost. - When his/her turn comes the device(QMN) will send a message notification to the concerned person to take their turn. This device is actually receiving the request from peoples and giving them the smart tokens. For sending the message we are using a Twilio SMS API in the QMN device. The notification of Turn can be sent by pressing the button in the QMN. When all the tokens are called out, you can clear the memmory by pressing the reset button on Arduino MKR WiFi 1010.User Interface When you connected to the AP, you will be redirected to the page like this After submitting the phone number, you will get an OTP on that number. Then it shows the OTP page to enter your OTP number. When you submit the correct OTP, you will get the token on this token page If you entered the wrong OTP, it will show like this If your number had received the token already, it will load this page That's all about the User InterfacePossible Use Cases - Hospitals - Shops, Hotels - No internet connection required. - A simple user-friendly web interface. - Native device notification, when the turn comes. - No physical tokens. - Easy to implement. - No unnecessary waiting time, show up when your turn comes. The brain of the QMN is Arduino MKR WiFi 1010. It is the easiest point of entry to basic IoT and pico-network application design.. This QMN completely relies on the WiFi connectivity of the Arduino MKR WiFi 1010. The device uses both AP(Acces Point) mode and STA(Station) mode of wifi module. The device will intelligently switch between these modes for the proper operation of this device.Arduino IDE The Arduino IDE is used here for programming the Arduino MKR WiFI 1010. Please have a look here for getting started with the device. Use the latest Arduino IDE for programming the Arduino MKR wifi 1010. Before going into programming check if there is any latest firmware update for the device. Please have a look here to know how to update the firmware.Captive Portal We are actually creating an access point (AP) by Arduino MKR WiFI 1010, any device(mobile) can be connected to this AP. For going into the web interface in past, person needs to type the IP address or hostname in the browser. That's almost okay, but the user needs to manually put the IP or hostname on the browser. That is really weird thing. But in this case, the device which is connecting to the QMN will automatically be redirected to the web interface via the Captive Portal. Here the Captive Portal plays a big role in reducing the effort of the user. There are a lot of Captive Portal projects with Espressif devices, unfortunately there are none with the NINA library. Because the MKR WiFi 1010 uses the NINA library. At last, I found a project in Arduino hub which uses Captive Portal as the key things by JayV Then I began my project by taking it as the base code. It is almost working fine. What we are actually doing is that we are setting DNS and to own Access Point(AP) - IP address and checking first (16) DNS requests via UDP port 53. After checking the first 16 requests we will send a response for the DNS requests with the redirected IP address of own Access Point. Then the phone will automatically load the web interface through the web browsers. The final effect will be like this when a device connected to the specified AP, the phone will automatically load the web interface. UDP server and Webserver work both at the same time. The web server is a simple main page with a form button for entering the phone number.Twilio & Things speak Unfortunately, I don't have a GSM module to send the messages. For sending the OTP and device notification we need to use any SMS API. So in this project, I used Twilio's SMS API for accomplishing the task. As we know that, for the API to work we need to give the HTTP request for the server. First I gave normal HTTP request without any encryption to the Twilio, but the Twilio didn't consider my request. They need SSL fingerprints to ensure security. I didn't see any functions in NINA libraries which support these SSL's. So I used Thingsspeak to trigger the Twilio. For using these services you need to register in both the platforms. In Twilio create a new number and that will be the number in which you sent the data. You will get free credit in Twilio for messaging. For the trial account, you need to verify the numbers in which you want to send the data. Go to Thingspeak.com, click on apps, then ThingHTTP, and then New ThingHTTP. This will take you to the setup page. You will have to find your Twilio account SID and auth token on your Twilio dashboard page. -. The API key of the ThingHTTp should be included in the Arduino Sketch.AP OR STA Mode All the Arduino boards having Nina module do one role at a time i.e Station mode or Access Point mode. We need to constantly switch between these modes to get the job done. First, the QMN will be in AP mode after getting the number it will switch to the STA mode for sending the OTP. After sending the OTP the QMN will switch back to AP mode. If a person triggered the push button, the QMN will switch to STA mode for giving SMS notification. After that, it will come back to AP mode. For giving the internet connection we are switching the QMN for STA mode. The SMS API requires internet connection.TM1637 4 Bits Digital Tube LED Display & Push Button The TM1637 4 Bits Digital Tube LED Display Module is an affordable solution for displaying the output data of your embedded project. Though the data displayed is restricted by numbers still it allows users to display some characters too like A, B, C etc. The current token number which will be running is displayed on this 4-bits seven-segment LED. This 7 segment LED Dsiplayhas 4 digits which are controlled by TM1637 Driver Chip. It requires only two connections to control this TM1637 4 Bits Digital Tube LED Display Module. By looking on this display any one can easily understand the Token number. That's the real use of this device. You need a library called TM1637Display.h for working with this module. Just download the library from here. Here push button is used for calling the tokens. I have used the push-button module so it is very easy to integrate. Here the push button is in the pull-down mode. You can easily make a module with a resistor and the push button.Circuit The circuit is very simple, it doesn't consist of any complex hardware. First I made the circuit on the breadboard. There is only one GND and 5V in Arduino. So we need to give it on the rail for making more connections.Case I got this case from a local store. I just cut a small piece in the front for showing the seven segments led for showing the token. I have also torn two-piece from the side one is for the push button and the other is for the USB cable. To giving power for the Node. This case is very suiting, All the components are placed very well. The final outlook of the device... All the HTML pages shown in the interface are stored in the flash memory of Arduino MKR WiFi 1010. For storing that I used the PROGMEM utility. PROGMEM is part of the pgmspace.h library. It is included automatically in modern versions of the IDE. However, if you are using an IDE version below 1.0 (2011), you’ll first need to include the library at the top of your sketch, like this: #include <avr/pgmspace.h> While PROGMEM could be used on a single variable, it is really only worth the fuss if you have a larger block of data that needs to be stored, which is usually easiest in an array. We have a large block of data here so we are going for this. All the HTML files are stored in "source.h" tab. If you want to load your own web page in Arduino, just refer below code. const char page[] PROGMEM = R"=====( <html> --Your wep page content </html> )====="; You need to call this page on the main programme like this client.print(page) For put the QMN in AP, and activating the server we are just using these functions WiFi.config(apip, apip, gwip, IPAddress(255, 255, 255, 0)); WiFi.beginAP(ssid_ap,pass_ap, apChannel); server.begin(); Actually we are configuring web server with constant AP IP address, gateway IP and subnet mask. When we submit the phone number on the index page the data will be stored to the "number" variable. This is done by means of this piece of code if (currentLine.endsWith("POST /checkpass.php")) { } After it will load the OTP page. An OTP will be generated by the random function random(min, max); That will be sent to the phone number by making a TCP connection to the remote host(Thing speak). For making the HTTP request we need to connect the QMN to internet. ie, To switch to STA mode. For switching the STA mode we are using the function. WiFi.begin(ssid_sta,pass_sta); When we switching between these modes use WiFi.end() That is for clearing wifi memory. Otherwise, it will affect your program. When the user enters the OTP received, the programme checks whether the submitted OTP and generated OTP are the same by means of this condition if( otp_gen == otp_sub) { } When they are same it will be added to the Queue array. Actually we are adding that number to the array. The person with that number will be in the queue of the array. queue[i] = number; Then we clear all those variables to receive a fresh value. number = ""; otp_gen = ""; otp_sub = ""; when the user enters the invalid OTP, error page will be loaded. If a number is already in the Queue and that is requesting for another token the device will flag that number and the corresponding page will be loaded. This is done by means of checking all array elements by this loop. for (x=0; x<2; x=x+1) { if (queue[x] == number) { return 0; } } this condition is only executed when there is a valid OTP. If the push button is triggered, the QMN will call the token according to the queue array. That is First in First out(FIFO) method.What in future? - Switching of modes can be avoided by using the GSM Module - Token Forwarding Facility - Facility for the recall that is the admin can recall the persons who turn is over.
https://www.hackster.io/Mudd11/queue-management-node-qmn-33578a
CC-MAIN-2021-04
refinedweb
2,229
73.98
The following code converts the float type 7.5 7 int int main() { int i; float f = 7.5; i = (int) f; // Valid in C/C++ } C/C++ i = int (f); // It is worked in C++ but not C i = int (f); is valid in C++ but not in C. From the C99 Standard, 6.5.4 Cast operators cast-expression: unary-expression ( type-name ) cast-expression C++ supports the above form of casting as well as function style casting. Function style casting is like calling the constructor of a type to construct on object. Check out the following block code compiled using C and C++. #include <stdio.h> int main() { float f = 10.2; int i = int(f); printf("i: %d\n", i); } Non-working C program: Working C++ program:
https://codedump.io/share/gw2Z9WchQcWe/1/explicit-type-casting-operator-in-cc
CC-MAIN-2017-17
refinedweb
131
75.4
i am student, i want to find one app about english dictionary by j2me! please help me if you have source dictionary by j2me, please send it to my gmail thuhuongyb@gmail.com thank you too much help me, I just copy above code in java SDK 3.0 and use ClamshellCldcPhone1 emulator and nothing happen, emulator just show black screen. Post your Comment What is a vector in Java? Explain with example. What is a vector in Java? Explain with example. What is a vector in Java? Explain with example. Hi, The Vector is a collect of Object... when required. There are 2 types of Vector i.e. synchronized and Vector contains Vector Iterator Java Example interface can traverse all its elements. Java Vector Iterator with Example import java.util.Iterator; import java.util.Vector; public class vector... Vector is a collection class.It works similar to the Array. It has Java Vector Java Vector In this tutorial, you will learn about vector and its' implementation with example. Vector is alike to ArrayList , it is also dynamic... In the below example, you will see vector and different methods implementation Vector in java . Example : The following program to illustrate the use of vector: import...Vector in java Vector in java implements dynamic array. It is similar to array and the component of vector is accessed by using integer index. Size J2ME Vector Example J2ME Vector Example This application illustrates how we can use Vector class. In this example we are using the vector class in the canvas form. The vector class Vector in Java . Vector java Example: package Toturial; import java.util.Iterator; import...Vector in Java are array lists that are used instead of arrays, as they have... data even after the Vector has been created. Capacity of vector is always Java : Vector Example Java : Vector Example This segment of tutorial illustrates about the Vector class and its use with the Iterator interface. Java Vector : Vector class...(it.next()); } } } Description : In this example we are using vector to store Java Vector in this vector. Understand Vector by Example- This java example shows us... Java Vector Introduction to Vector Vector are array list with extended properties which Java Vector Iterator all its elements. Java Vector Iterator Example import java.util.Iterator... Java Vector Iterator is a Collection class. It has similar functionality...(String[] args) { Vector v = new Vector(); String tree[] = { "olive", "oak Convert Array to Vector Convert Array to Vector In this section, you will learn to convert an Array to Vector... into a Vector. Here we have taken an array of data which gets converted to data Convert Vector to Array Convert Vector to Array In this section, you will learn to convert Vector to an Array. Code Description: This program helps you in converting Vector to an Array. Here Sorting Vector Element using I18N Sorting Vector Element using I18N This Example shows you how to sort vector element using I18N.... Methods used in this example are described below : Vector.add() : Vector is like Java Vector in the vector: public int lastindexof(obj abc) Following is the Example of Java... to them. They also shrink when data is removed from them. The Vector class... are added to Vector during runtime. Items can be removed even after the Vector Vector Example in java Vector Example in java In this example we are going to show the use.... In this example we are using seven methods of a Vector class. add(Object o): It adds arraylist and vector arraylist and vector what is the diff b/w arraylist and vector, Java collection Vector Java collection Vector How can we use the vector class in java program? The vector class is similar to the ArrayList class except that the vector class is synchronized. import java.util.Iterator; import Open Source Vector to an official 1.0 release. An example of a commercial-standard vector graphics...Open Source Vector Open source vector graphics The open source Xara Xtreme project -- which is building a commercial-standard vector graphics share code dictionarythu huong April 2, 2011 at 10:30 AM i am student, i want to find one app about english dictionary by j2me! please help me if you have source dictionary by j2me, please send it to my gmail thuhuongyb@gmail.com thank you too much the code not workeko January 3, 2012 at 1:03 PM help me, I just copy above code in java SDK 3.0 and use ClamshellCldcPhone1 emulator and nothing happen, emulator just show black screen. Post your Comment
http://www.roseindia.net/discussion/22811-J2ME-Vector-Example.html
CC-MAIN-2014-52
refinedweb
760
58.99
What will we cover in this tutorial? How to get a list of all possible webcam resolutions of your webcam using OpenCV. This is not straight forward as this feature is not supported by the OpenCV interface. Why is this not trivial? The quick answer is, there is so many different webcams that have different drivers. That means that webcams have different features, and can do different things, support different resolutions. Hence, OpenCV has made support for the most common things. Among those are the following. - cv.CAP_PROP_FRAME_WIDTH Width of the frames in the video stream. - cv.CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream. You can see the full list here. The things you can do is to call the get and set with these parameters. The get will get the current used value and set will set it. import cv2 cap = cv2.VideoCapture(0) width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) print(width, height) Which will with my laptop print. 1280.0 720.0 Hence, my webcam has a 1280×720 pixels resolution. To figure out all possible resolutions of your webcam, you would expect to find that as a possibility. But you will be disappointed. You can’t just set it to any resolution Webcams have a fixed set of possible pixels resolutions. The default for mine is 1280×720 pixels, but that does not mean it can only use that. Unfortunately, the diversity of possibilities of resolutions is big and differentiated and not standardized. Also, you cannot just set it as you want to. Let’s try that. import cv2 cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 300) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 400) width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) print(width, height) We try to set it to 300×400 pixels to see what happens. 640.0 480.0 As the above shows, it sets it to 640×480 pixels. That means it sets it to some other possible resolution if you set it to something it does not support. So how to find out what resolutions are supported by your webcam? Good question. I thought you would never ask. Don’t we just do it like this. for width in range(1, 15360+1): for height in range(1, 8640+1): # Try the resolution to see if it works That is we try all possibilities all the way up to the highest resolution we know of (16K). Bad news. It is pretty slow to check if a resolution works, let’s just say it can try 100 per second. That would take 15360×8640 = 132710400 iterations. With 100 per second it would take 15.36 days to try out. So a better idea is to try out all know resolutions. We find a pretty good list on wikipedia of List of common resolutions. Using Pandas read_html to get the table and run through them all can be done like this. import pandas as pd import cv2 url = "" table = pd.read_html(url)[0] table.columns = table.columns.droplevel() cap = cv2.VideoCapture(0) resolutions = {} for index, row in table[["W", "H"]].iterrows(): cap.set(cv2.CAP_PROP_FRAME_WIDTH, row["W"]) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, row["H"]) width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) resolutions[str(width)+"x"+str(height)] = "OK" print(resolutions) For my webcam it returns the following list. { '320.0x240.0': 'OK', '640.0x480.0': 'OK', '1280.0x720.0': 'OK' } And it took 8.28 seconds to try out the 193 possible resolutions from the list. That is less than 23 checks per second. Hence, the 100 checks seems to be quite optimistic. On my laptop it would take over 66 days to check them all out in the naive way.
https://www.learnpythonwithrune.org/find-all-possible-webcam-resolutions-with-opencv-in-python/
CC-MAIN-2021-25
refinedweb
626
78.65
Board index » C Language All times are UTC Eg. Suppose I have the following: char *str = "Hello Dolly \0" Is there a right trim function that will remove the trailing spaces and make *str looks like "Hello Dolly\0"? Thanks, Al /* * strrtrim.c -- remove trailing whitespace from a string * * Part of publib. See man page for more information */ #include <assert.h> #include <ctype.h> #include <string.h> #include "publib/strutil.h" char *strrtrim(char *s) { char *t; assert(s != NULL); t = strchr(s, '\0'); while (t > s && isspace(t[-1])) --t; *t = '\0'; return s; > char * Is there a right trim function that will remove the >trailing spaces and make *str looks like "Hello Dolly\0"? Dan -- Dan Pop CERN, CN Division Mail: CERN - PPE, Bat. 31 R-004, CH-1211 Geneve 23, Switzerland -- #include <standard.disclaimer> _ Kevin D Quitt 91351-4454 96.37% of all statistics are made up > Eg. Suppose I have the following: #include <ctype.h> #include <stdio.h> #include <string.h> void trim_trailing ( char *s ) { char *c; if ( s != (char *) NULL ) { c = s + strlen ( s ) - 1; while ( c >= s && isspace ( *c ) ) --c; *++c = '\0'; } /* trim.c Written by D'Arcy J.M. Cain PlanIX, Inc. trims leading and trailing spaces while copying string note that destination and source can be the same string if dest is NULL then allocate space */ #include <ctype.h> #include <stdlib.h> char * trim(char *d, const char *s) { char *dscan; const char *last; /* skip leading space */ while (isspace(*s)) s++; /* find end of string */ for (last = s; *last; last++) ; /* find last non-space character */ while (last > s && isspace(*(last - 1))) last--; /* if dest is NULL then allocate space */ if (!d && !(d = malloc((last - s) + 2))) return(NULL); dscan = d; /* copy string */ while (s < last) *dscan++ = *s++; *dscan = 0; return(d); >> Eg. Suppose I have the following: >> char *> Is there a right trim function that will remove the >>trailing spaces and make *str looks like "Hello Dolly\0"? > The standard C run time libraries do not have one. Here is one that > works ... this better not be solving your homework assignment. Since > it is pretty late in the semester, I will make this assumption. > #include <ctype.h> > #include <stdio.h> > #include <string.h> > void > trim_trailing ( char *s ) > { > char *c; > if ( s != (char *) NULL ) > { > c = s + strlen ( s ) - 1; > while ( c >= s && isspace ( *c ) ) > --c; > *++c = '\0'; > } > } Change it to void trim_trailing ( char *s ) { char *c; if ( s != (char *) NULL ) { c = s + strlen ( s ); while ( c > s && isspace ( *(c - 1) ) ) --c; *c = '\0'; } } -- Mike Rubenstein This does not make any sense at all! The code works just fine. If you're complaining about the fact that 'c' is being dereferenced when it is below 's,' you're worrying unnecessarily. Thanks to C's "early-out" mechanism, the dereferencing of 'c' will never happen when c < s. --- Tim However, the point is that the standard does say that one is not allowed to even generate a pointer that does not point inside an object, or one past the end. The beginning of the object does not count as an end. The following is invalid: char s[10]; if (s-1 < s) printf("s-1 < s"); else printf("s-< >= s"); See the FAQ, 2.16 if I remember correctly. -- My name is Ozymandias, king of kings/Look on my works, ye Mighty, and despair! Mike said: How else do you search for a particular pointer in a linked list, for example. I did read K&R's ANSI definition and concur with your statement, but it then seems impossible to implement the above problem. --- != and == are called equality operators and are treated in a separate section. These operators may be used with any pointers to objects as long as the types are compatible. Also, equality operators are valid if one of the pointers points to an object and the other is null or a valid void*. However, the equality operators cannot be used unless the pointers point to valid objects or are null, so in the case given where a pointer is decremented below the start of an array the equality operators would also be undefined. Of course this doesn't hurt comparing pointers to objects in a linked list for equality, since each is (or better be) a valid pointer or null. I can't improve on what David F. Skoll and Lars Wirzenhius said, except to suggest that you read the standard. It is very clear on this. Perhaps it will make sense to you. 1. Way to do trim right in string? 2. String Trim function 3. RTRIM$(right trim)? 4. Help needed with converting Basic TRIM and Right to C equivalents 5. Need help with string functions: LEFT, RIGHT and MID 6. Trim a comma off end of string??? 7. Help to trim spaces of a string 8. trim the string 9. String.Trim() behavior 10. String.Trim is this correct functionality 11. String.Trim(); 12. Need help to trim spaces off a string
http://computer-programming-forum.com/47-c-language/2350a88d3f080c54.htm
CC-MAIN-2017-30
refinedweb
834
84.47
By Vikas Kumar Income Tax Return Filing: The due date of filing income tax return for the previous year 2016-17 (assessment year 2017-18) is finally here. So while you are busy in collating your investment proofs and other information, you should take care to avoid the most common mistakes people make while filing the return of income. Here we attempt to highlight a few of such mistakes: Adherence to the filing timelines In general parlance, people tend to miss the return filing due dates where the entire taxes are either paid as advance tax or are already deducted by the payer of income. In such cases, interest u/s 234A on account of delay in filing the tax return does not get attracted in absence of any unpaid taxes. But there are other benefits which you will lose if you file a belated return. In case of any refund claimed in the return, the interest on such refund shall be computed by the tax department from the actual date of filing the return (belated) and not from the first day of the assessment year (i.e. April 1st). In other words, a one day delay in filing would mean losing interest on refund for at least four months (April, May, June and July). Another loss of benefit is the restriction on carry forward of losses in a belated return (except loss from house property). Reconciliation of prepaid taxes with Form 26AS A common mistake people make is not to collect and reconcile all the supporting documents before filing the tax return. Documents like Form 26AS (downloaded from income tax website), Form 16 (salary statement), Form 16A (interest certificate) should be kept handy and quickly reconciled with the computation of income for any mismatch. You can contact the payer of the income (employer, bank etc.) to obtain these documents in case you have not received the same yet. Any mismatch of prepaid taxes claimed in your return with the amount appearing in your Form 26AS may result in receipt of a demand notice from the department. Non-reporting of exempt income We generally give importance to computing taxable income and meanwhile forget to pay attention on exempt income disclosure. Since it is equally important to report exempt income in the return form to disclose your annual financial transactions, you should not forget to report dividend from mutual funds/ Indian company (up to Rs 10 lakhs), NRE bank interest, exempt capital gains on sale of shares/units of mutual funds etc. Also, you should keep relevant supporting documents for these exempt incomes to answer questioning (if any) from the tax department at a later stage. Watch this also: Determination of type of capital gain The capital gains are of two types – short term capital gains (STCG) and long term capital gains (LTCG). A careful examination of the type of capital gain is required before applying the applicable tax rates. Many of you must be investing in securities and incurred capital gains during the year on sale of such assets. Considering the different holding period for different securities, people tend to determine the type of capital gain incorrectly. In case of sale of listed shares/ securities, units of equity oriented mutual funds, units of UTI and Zero coupon bonds, capital gain is considered as LTCG if they are held for more than 12 months. For unlisted shares of a company, such 12 months is substituted by 24 months and for unlisted debentures/ debt oriented mutual funds, it is 36 months. Another area of common mistake which NRIs do to claim basic exemption limit of Rs 250,000 from STCG on sale of listed equity shares/ equity oriented mutual fund in India. For non-residents the basic exemption limit of Rs 250,000 does not apply against the STCG earned on sale of such shares/units. Also Watch: Computation of income from house property There are rules prescribed for computing income from house property where the individual owns multiple properties or the property is let out for a part of the year. It is common for people to skip the prescribed rules in this regard. In case you are living in your house for a part of the year and for the remaining part property is let out owing to your overseas assignment, you should compute taxable income on the actual rent received (for the period of actual let out) plus notional rent for the period you self-occupied the house (considering deemed let out). Also, where you have multiple properties and more than one house is self-occupied/ vacant for the whole year, only one property can be considered as self-occupied and rest all such properties to be taxed as deemed let out. You should be careful in computing such income and disclose all your properties in the tax return and not just the let out properties. Watch this video: Applicability of clubbing provisions Most of us are aware that there is no joint filing concept in India and hence, you and spouse are required to file return of income independently. We sometimes tend to over-look income which is not earned/ received by us, yet may be clubbed in our hands owing to clubbing provisions. This may apply where income is earned from an asset which is transferred by you to minor child/ spouse/ son’s wife without adequate consideration. Special attention should be paid on the computation of such clubbed income and its disclosure in the tax return to avoid any mistakes in this regard. Set off of brought forward losses We generally believe that return of income filed for one year is independent of another and hence, there is no correlation. While saying this, we tend to forget the fact that under the law, set off of brought forward loss from past year against income of current year is allowed. While filing return of income, it is always better to give a glance to the last year filed return to check if any loss is carried forward and eligible to be set-off this year. Quoting wrong assessment year while paying self-assessment tax It is not always necessary that tax on the income earned by you is already deducted by the payer. Then you will be required to pay due taxes through self-assessment mode before filing the return of income. It is very common to see people filling incorrect assessment year in the tax challan while depositing self-assessment tax and consequently end up paying taxes for the wrong year. To clarify this, you should remember to fill assessment year 2017-18 while preparing the challan for this year’s tax (i.e. financial year 2016-17). Also, you should check your Form 26AS around 10-15 days after depositing the taxes to confirm if the credit of such taxes appear under your PAN against the correct assessment year. Selection of incorrect return form It is seen that people generally select incorrect return form to file their return of income. If you file return using an incorrect form, it will be treated as defective return and you may receive notice from the tax authorities to rectify the same. If you have income from business or profession you can use ITR 3, elsewise, you can use return forms ITR 1 or ITR 2 depending upon the source of income, residential status, etc. You can use ITR 1 if you have income from salary, one house property (excluding loss cases) and income from other sources (excluding loss cases, winning from lottery/ race horses, dividend income from domestic company exceeding Rs 10 lakhs, unexplained cash credit/ investment/ money/ expenditure). However, if your total income exceeds Rs 50 lakh or you are required to disclose your foreign assets/income, you cannot use ITR 1. Reporting disclosures Generally people do not give much attention to disclosure requirements in the return form. Either they leave the required fields blank or provide estimated value against the assets, income, liabilities to be disclosed in specified schedules. There are two important disclosure requirements in the return form under the schedules – “AL schedule” (applicable only where total income exceeds Rs 50 lakh) and “FA schedule” (applicable only for ordinarily residents having assets/ accounts, etc. situated in a foreign country). Extra care should be taken while reporting foreign assets/ accounts, etc. as non-reporting or incorrect reporting will attract a penalty of Rs 10 lakh and may also expose you to prosecution provisions under the Black Money (Undisclosed Foreign Income and Assets) and Imposition of Tax Act, 2015. While filing return of income this year, you should also remember to report the amount of cash deposited in bank account during the demonetization period (09.11.2016 to 30.12.2016) if the amount deposited is Rs 2 lakh or more in aggregate of all bank accounts. (The author is Director-Personal Tax, PwC India. Views are personal.)
https://www.financialexpress.com/money/filing-income-tax-return-10-common-mistakes-people-make-while-filing-return-of-income/746455/
CC-MAIN-2018-17
refinedweb
1,491
54.36
Archives ASP.NET MVC Grid View using MVCContrib In this post, I demonstrate how you can use the Grid UI helper of the MVCContrib project in your ASP.NET MVC application. MVCContrib is a community project that adds the functionalities to Microsoft’s ASP.NET MVC Framework and makes the framework easier to use. MVCContrib provides several UI helpers and Grid UI helper is one of them. The Grid helper provides the functionalities of GridView control of ASP.NET GridView. The Grid component generates HTML tables for displaying data from a collection of Model objects and it support paging. The MVCContrib project can download from. The following are the steps to get Grid to work: Step1 Add a reference to the MvcContrib assembly (download available from ) Step 2 Add a namespace import for MvcContrib.UI.Html to your web.config file: <pages> <namespaces> <add namespace="MvcContrib.UI"/> <add namespace="MvcContrib.UI.Html"/> <add namespace="MvcContrib.UI.Html.Grid"/> <add namespace="MvcContrib"/> </namespaces> </pages> Using the Grid ASP.NET MVC Book List The below is the list of some of the upcoming books on ASP.NET MVC. ASP.NET MVC Preview 4 Released ASP. ASP.net MVC Vs ASP.net Web Form Software.
http://weblogs.asp.net/shijuvarghese/archive/2008/07
CC-MAIN-2015-22
refinedweb
202
61.43
Introduction: electric scooter conversion. As of right now, it has a solid 5 rating - I didn't know you could write that much on Instructables without telling anyone how to build anything and still receive perfect reviews. I). Step 1: R/C Hobby Parts: the Pros and Cons The focus of my previous instructable was on repurposing R/C hobby, namely model aircraft, parts for vehicle propulsion. It will also make up a substantial portion of this one. four times. In the size and weight of a 500 watt electric scooter motor (about 3 inches diameter, 4 inches long, and weighing about 4 pounds), the example motor shown at the bottom can handle 2 - 3000 watts (i.e. 3 to 4 horsepower) of power throughput, and up to 6,000W peak. The advent of mass produced, high performance lithium ion batteries is another factor. A modern lithium polymer (more on the distinction between different lithium flavors later!) battery the size of a U.S. house brick (about 1.3 liters) can store up to 300 watt hours, at the usual lithium polymer energy density of 240 Wh/L, and discharge at several kilowatts for a few minutes. No traditional lead-acid, or even nickel-chemistry battery system can come close to that. Granted, there are downsides to having such a dense energy source, and those will be addressed, but the truth is there in the numbers. Part of my engineering interest is taking advantage of these increased power densities to create ever more compact but still practical vehicles. For instance, only with brushless technology is the miniature hub motor I've been working on a possibility...much less electric inline skates using them . Electric scooters are still imagined by most people to be large, low-performance, lead-acid battery powered monstrosities that are reserved for kids with back yards too big to run around efficiently in. While this may be true for most commercially sold ones due to cost reasons, if you're building one yourself, there's no reason to not expect better. R/C aircraft components are the best power-to-price electric power systems, period . You'd notice I left the fine print "commonly available" up there, because there are things which are more hardcore than a cheap brushless motor in terms of power density. The problem is, you can't buy them. Not easily, and definitely not cheaply. However, because R/C aeromodelling has become so prolific, parts are readily available over the internet and extremely cheap. Most R/C parts these days are manufactured in China and other East Asian countries and sold by dealers directly from those areas. Even three years ago when I wrote my previous piece, this was not as true as it is now. The electric flight market, for the better part of the last decade, was dominated by high-market European manufacturers, and. Depending on your power needs, a motor that has enough wattage to propel a vehicle will run between $40 to $100. A controller that can run the motor will be around $50 to $150. Batteries are the big cost breaker for EVs, still, but typical model aircraft lithium polymer packs price out to between $0.40 to $0.70 per watt-hour . A scooter may have around 150 to 200 watt hours of battery onboard, so expect about $70-100 in commonly available batteries. As I will show in more detail, you can assembly a roughly 1500 watt electric power system for something like $200 to $300 - and that is for everything , even including batteries, and a charger if you're good at shopping. All of this might seem like cheap airplane parts are the way of the future. However, they also have their downsides. Unfortunately, R/C aircraft components are usually very rudimentary and somewhat fragile . Let's face it - airplanes are, in the purely mechanical sense, pretty easy loads to handle. The torque and power required increase directly with speed, so there's no static or locked-rotor (stall) conditions to worry about, unlike in vehicles where maximum torque is required at zero speed in order to accelerate from standstill. Airplanes are never supposed to hit things, bound over rocks and sidewalk cracks, or be jostled by suspension movements. What matters more in aeromodelling is light weight. As a result, model motors are usually made as lightweight as possible, using thin metals and plastics, and undersized bearings and shafts. Substantard metal alloys (such as soft architectural aluminum) are common in "cheap" motor construction. R/C power controllers are usually single-PCB affairs which pack power semiconductors and logic right next to eachother using the minimum amount of support components possible by design, and only capable of reaching their ratings when placed in a constant airflow (such as, you know, the draft of a propeller), and are rated to just under the peak power handling capabilities of the semiconductors. One of the major themes I'll hit on later is that you must derate everything . The ratings given for aircraft parts, especially cheap ones, are generally unrealistic for EV use. Even worse is the fact that the average aircraft lithium ion pack doesn't have a hard shell. Lipo batteries are little squishy bags of volatile electrolytes and active alkali metals. I don't know who's great idea it was to make a battery without a shell, but the abuse of a vehicle dictates that batteries must be properly mounted and proofed from shock, impacts, and weather. It makes me cringe a little even recommending using lithium batteries to the public because of those reasons. You've been warned. Overall, though, I think my point is clear: R/C model aircraft components are a good economical choice for those looking to begin experimenting with electric vehicle technology . It may not be the best solution for someone looking for a reliable, maintenance-free, long-life commuting vehicle. Many R/C components are definitely not manufactured to vehicle specifications and may fail or become finicky over a period of time. There are definitely commercial EV solutions available which are even plug-and-play - but I'm assuming if you are reading this, you have a little sense of adventure. Step 2: A Typical Brushless Outrunner The dominant type of motor found in the aeromodelling world today is called the outrunner motor. The first image shows literally the first result I got when I searched "outrunner" on Google Images, and is pretty damn representative of most of them. Mounting the Outrunner So the cool thing about outrunners if the name didn't give it away already is that the outer case of the motor is the part that spins . In the first image, only the faceplate of the motor - the part the wires exit through - is stationary, and that is the part which gets mounted to something solid. Otherwise, the entire rest of the shiny gold and silver bell spins. This tends to render the motor unsuitable for conventional "DC" mounting styles like clamp mounting or double-supported mounting. The idea of the outrunner in aeromodelling is that you can directly mount a propeller to that rotor bell end. However, for EV usage, mounting a sprocket, pulley, or wheel directly to the motor in this fashion is a bad idea . The reason is shown in the diagram in image 2. The bell shaft bearing is effectively cantilevered, which means a strong side load (like tensioning your chain or belt, or a wheel load) can bend yourwhole motor .. Reading the Outrunner Outrunners are typically given a numerical designation similar to AA-BB-C(Y/D) There's two overlapping, almost indistinguishable, and sort of conflicting systems about what the letters mean. First is the stator-referenced system. In this system: 1. The first number AA indicates the stator diameter in millimeters. This is the active component in a motor which generates all the torque, so this is akin to selling cars by engine displacement. 2. The second number BB indicates the stator length (stack height), or the length of the magnets. 3. The third number C may be a low number (single digits to 20s), indicating the number of wire turns per stator pole. If it is a high number (high tens to hundreds) it is the motor's "Kv" constant, or voltage constant in RPMs / V 4. An optional Y or D means the windings are terminated Y or Delta - for the same TURN COUNT, Y-terminated motors rotate slower and with more torque for the same current draw, but need a higher voltage to achieve said current draw. It's a design tradeoff, but the vast majority of R/C outrunners are Delta terminated for convenience. The second is the motor-referenced system, more common for inexpensive motors, in what I can only assume is a ploy to amplify the apparent size of the motor. 1. The first number AA now refers to the total diameter of the motor, in millimeters. 2. The second number BB is the total length of the motor case, from front to back, minus the shaft. The third and fourth numbers typically remain the same. ... So how do you tell which one is which? If its not explicitly given to you as stator diameter , it is probably the latter system. The most definitive way to tell is if you have both data points - stator and outer diameters. A list of typical stator-to-motor diameter cross correlations for typical vehicle-sized motors is given below: 42 to 45mm stator > 50 to 55mm motor case 50 to 52mm stator > 63 to 65mm motor case 68 to 70mm stator > 80 to 85mm motor case Sizing the Outrunner Most electric scooters will find a motor in the 60mm (motor) diameter class more than sufficient. A good seller will give at least two important specifications which you can use to determine rudimentary drivetrain parameters. 1. The Kv rating is how fast the motor will spin per applied volt. Conversely, it is how many volts the motor will generate across its terminals if you spin it . This is largely a remnant of the DC motor days when you could dump your motor on a battery and it will spin. Electronic controllers, such as BLDC controllers, can actually vary this parameter of the motor significantly, so the Kv is just a rule of thumb unless you are a motor engineer . You can use the Kv rating in RPM/V, your system voltage, your anticipated drive ratio from motor to wheel (x to 1), and wheel diameter (in inches). to calculate a theoretical top speed for the vehicle. This is a purely theoretical number in an ideal, frictionless world. The equation goes Speed (mph) = [( RPM/V * System voltage ) / (Gear ratio )] * (Diameter * pi ) * (60 minutes per hour / 12 inches per foot) / (5280 feet per mile) A cool little resource that does all this for you and even provides you with acceleration and battery figures is the Tentacle Torque and Amp Hour calculator , written for the combat robot community by the late Steve Judd, a long time Battlebots and robot combat competitor. The website is still maintained as a resource for robot builders. As expected, it's very "robot oriented", but to use it for vehicle calculations, just plug in your own motor statistics (or take a best guess), and use 0.5 motors per side if you have single motor drive. Note that "Average % of Peak Drain " should be turned down to 5% or 10% for EV usage - that is the amount of time you spend doing burnouts or launching from standstill. A database of cataloged motors exists for sanity checking. If you only have a KV rating, then the only thing you can estimate is the top speed. 2. The internal resistance of the motor, also known as winding resistance, terminal resistance, etc. It will generally be a low number (less than 1) ohms . Given this value and your system voltage, you can calculate the maximum current draw the system can theoretically see based on Ohm's Law, I = V / R . Real current draw will be less (but not much less) than this value due to the inherent resistance of copper wire, semiconductors, switch contacts, etc. But again, a ballpark figure. Additionally, as described in my just-build-your-own-damned-motor-already writeup , given the Kv of a motor in RPM / V, you can also find the torque produced per amp of current draw. RPM/V is not a SI unit, but a little math will get you to the SI definition of an electric motor's voltage constant, V / (rad/s) ; that is, volts per (radian per second). In short, the voltage constant in V / rad/s is also the torque constant in Nm / A, or newton-meters per ampere . If you're that inclined, Nm/A can be directly back-converted into ft-lb/A or in-oz/A, as they are all units of torque. Therefore, if you know the IR of the motor, and your system voltage, you can find a theoretical peak torque value for the system, which is useful for calculating maximum accelerations: Torque (Nm) = (Nm/A ) * (System Voltage / Motor Resistance ). This number is, indeed, very theoretical. I'll address special considerations for R/C motors near stall in a little while. More Sizing of the Outrunner Some times, you will also see a power rating - usually in the hundreds or thousands of watts. It's important to remember here that the value given will almost invariably be power input - that is, the power your battery is feeding into the motor. If you are familiar with DC motor principles, you know that the motor can only ever deliver 50% of this value back out as mechanical output power - torque times speed. (If you're not, read this ). And that's if it's a ideal motor - at this operating point, 50% or more of the input power is being dissipated as heat. Essentially, the "power rating" figure is not very helpful, since if the motor is operated at anywhere near half of the figure, it will quickly overheat. Ultimately, the way to size a motor by power is to roughly calculate your total drag force using the Drag Equation, and assuming Cd is about 1.0 (for a person standing up and moving forward), and multiply that by your desired cruising speed - in SI units, the result is the power the motor needs to output to keep you going at that speed. In other words, Pmotor in watts = (Drag Force in newtons * Cruising Velocity in meters per second). As a rule of thumb, this should be less than 15% of the maximum motor input power. Why not an Inrunner? An "inrunner" is the back-constructed word for a conventional brushless motor. In the aircraft domain, they are much less suited to vehicle propulsion because they spin significantly faster i.e. have very high Kv values. Subsequently, they require much more geardown to achieve the same torque levels. While inrunner drives are definitely possible, the added mechanical complexity is suboptimal. However, they're definitely easier to mount and less susceptible to getting dirt and road junk in the motor. Step 3: Buy an Outrunner! HobbyKing!! Anyone who recognized the motors in all the previous pictures will know exactly where they came from. In 2007, HK was just another random East Asian hobby store peddling shady equipment, and they were not exactly known for quality. Now, HobbyKing has pretty much taken over the entire modelling universe. For ease of reference, I've been using the term "ICBM" to refer to Hobbyking motors - not ICBM as the military uses the word, but I nexpensive C hinese B rushless M otor. Yeah, you can tell people your scooter is ICBM powered. They have several house brands for their equipment, of which Turnigy is just one. Last time I checked, they have 42 pages of outrunners. Now, most of those are still very small, but the sheer volume makes finding good motor candidates hard. This is why I have chosen to point out specific candidates which I have used, seen used, or recommended to people. If you want to hunt there on your own, I would stick to the "Large Motors " category - since the rest range in size from fingernail sized to something like a small guava. Update July 2012: As of May or June of 2012, most of these large motors are no longer sold by Hobbyking. Maybe they figured out that nobody actually uses them for planes! The information below should therefore be used strictly as reference for selecting motors from other vendors as a result. The same motors are still sold under the "EMP" brand by places such as Leader Hobby and other smaller overseas retailers. - An example of the "EMP" 63/74 motor - An example of the "EMP" 80/100 motor. Hobbyking's replacement for these motors seems to be their own in-house designed "SK3" series. The SK3 63xx motors are very much equivalent to the older type, but they are actually slightly smaller in dimensions with the 63xx series being actually 59mm diameter. They also have slightly higher internal resistance (but still very low - we're talking like 40 milliohms over 30 and similar scales). When selecting a motor, remember that ones with skirt bearings (explained below) tend to be more durable than ones without. End Update 1. The Turnigy 60mm class motor (63-54 through 63-74) Example Shown in image 1, these motors are good to about 1 horsepower output, which is more than reasonable. They're about 2.5" in diameter and have a 10mm shaft. These and similar 60mm class motors have been used to great success with 6" pneumatic tires in a scooter-building class section hosted at MIT, and on many e-scooter conversions like Jerome's and the EVPlasmaman scooter.. They have a modest Kv (200 to 280) and peak current draws of 50 to 60 amps. The Turnigy 60mm motors would make for a more than reasonable cruising vehicle with a very low profile, and is the motor I'd recommend to anyone who thinks of asking me what motor they need (because clearly you don't need any bigger). The reason I picked that motor line over the other HK lines is their use of askirt bearing / can bearing, which makes the motor more durable overall. Historically, these motors have been known to ditch magnets while at speed, resulting in pretty instant destruction. The addition of the can bearing in the latest iteration eliminates the high speed vibration that could break magnets free. The 63-64 is the motor that is behind Fanscooter . 2. The Turnigy 80mm class motor (80-85 and 80-100). Example This is my choice of weaponry for attacking the open road. These motors are almost absurdly powerful and torquey, and can output up to 3 horsepower. These would be better suited to a serious moped , road-going motor scooter, or full fledged electric bicycle conversion. They are easily capable of drawing over 100 amps, so a substantial controller will be needed - no current "cheap" model airplane controller can actually handle them reliable due to the very low resistance of the motor and subsequent high current draw That said, my Melon Scooter (so named because of the melon-sized motor) runs one of these on 40 volts, and is very conservatively geared to reach 25 miles per hour in just a few seconds. The gearing is more to save the controller and my life than anything - power conservation shows a top speed of more like 50mph. The 80-100 comes in two Kv flavors - 130 and 180 RPM/V. 3. The Turnigy Are You Serious, Hobbyking?120mm class motor I'm only including this for novelty sakes, because I have yet to conceive of a vehicle short of an e-bike or e-trike that can hit U.S. highway speeds that can use this motor to full potential. Maybe two of them on a real electric motorcycle? Seriously, what do you fly with those things? Why don't you just get in the airplane and fly it yourself at that point? This motor will need a legitimate EV traction controller for vehicular purposes. The highest current model aircraft controllers by Castle Creations, JETI, and other "legit" companies, in the 48 volt and 300 amp range , cost close to (or more than) that of a real vehicle controller anyway, so making the jump is a reasonable thing to do. And the EV controllers offer more specialized features that... you know, a vehicle would find useful, such as variable regenerative braking, torque control (close to the dynamics of a gas engined vehicle), and accessory lighting integration. Controller choices are a forthcoming topic, so stay tuned. AXi, Hacker, and other "Legit" Brands Before Hobbyking et. al. unleashed a sea of ICBM s upon the world, large outrunners were exclusively the domain of European manufacturers. I'm not going to talk too much about them, since if you already have one of them you'd know their power capabilities, but the AXi 5300 series and Hacker A50 and A60 are motors in the same power class as the HK 60mm motors described above, and would work just as well in the application. Shop around Those three are definitely not the only possible choices by far, but they were included because I have a pretty long history with them and know them well, among other reasons mentioned above. Hobbyking is also not the only dealer of BLDC motors, but they, in accordance with my justification at the beginning, seem to have the best price to performance ratio so far. Step 4: Controlling the Outrunner: Sensorless or Sensored? Unlike a regular old DC motor, brushless motors require an electronic controller to commutate the motor. The methods by which they do this differ, but it commonly boils down to whether or not the motor has Hall Effect sensors placed strategically in the windings to sense the magnet rotor's position so the controller knows which coils to switch. Other strategies include position encoder wheels, but the Hall sensor is by far the most prevalent in small motors. The vast majority of model motors are sensorless That's the first rule to keep in mind. Because the load experienced by an aircraft increases predictably with speed, and is zero at zero speed, pretty much all aircraft motors (especially outrunners) are just spinning rings of magnets on a stick. That is why they can be made so cheaply. Industrial motors for general motion control like servo systems and robots and whatnot need to deal with constantly varying and transient loads, so they therefore have some kind of feedback built in already. Usually, the only model motors that come with sensors built in are small R/C car and truck motors like this . Sensored control, the fundamental method In a sensored motor (and associated controller), there are generally three sensors positioned at 120 or 60 electrical degrees apart in the motor that output a 3-bit Gray-like position code. The placement of these sensors depends on the winding configuration, number of slots, and number of magnet poles. For more information on how and why this works, see my hub motor instructable . The advantage of sensored commutation is that the controller always knows where the motor is. Therefore, sensored motors usually have low-speed and stall characteristics more like a classic DC motor. Sensored commutation has its downsides - sensor failure or glitchiness could mean the motor controller stops running the motor. Most inexpensive industrial and commercial controllers, even EV controllers, are just governed by a state table whose input is the 3 Hall sensors, and so sensor failure would mean an erroneous output and non-running motor. There is also the issue of finding the correct combination of Hall sensor leads and motor phase outputs - up to 12 possible ways of matching them together, if your motor doesn't come with a controller. Finicky details aside, sensored commutation is the way to achieve consistent stall- and low-speed behavior, and "real" vehicle controllers are usually sensor-commutated. Sensorless control, i.e. R/C speed controllers A sensorless controller has to detect the position of the rotor through some other means. The most common method is to randomly bump the motor (drive two phases) and observe the back-EMF, or generated voltage, profile on the third phase. The slope of the BEMF dictates which direction the motor moved in, and therefore which coils to switch next. Sensorless commutation uses more sophisticated position state estimators which have to have some speed feedback to "pick up". In other words, sensorless motors can't technically move without being already moving. This clearly poses a quandary for vehicles and other inertial loads. If the initial "bump" is not strong enough, the motor will not move enough to generate a meaningful voltage pulse. For viscous loads like a propeller (load proportional to speed), this is not a problem at all. Therefore, the vast majority of R/C controllers are sensorless. Sensored ones do exist - again, for cars and trucks, which as you might guess, are just like tiny versions of EVs you personally ride, an inertial load. Sensorless controls for cars and trucks are also commonplace, since modern motors are powerful enough that a pulse on the phases is enough to move a small model. The biggest implication of sensorless control is the dubious availability of stall torque. This makes the torque equation presented two pages ago a little misleading. You will generally not be able to stand on a vehicle and challenge someone to a drag race. Sensorless vehicles, especially those using R/C parts, will need to be "kicked off" or push started . Sensor Augmentation The only real difference between a sensored motor an a sensorless motor is... sensors . You can actually take any common R/C outrunner and add Hall Effect sensors to use them with EV traction controllers. Numerous ways exist to append sensor feedback onto your motor: I cover installing sensors within the windings (inside the motor) on my hub motor Instructable; and here are twoways people have used externally mounted sensors. You would end up with an additional 5 wire harness coming out of the motor, comprising three Hall sensor outputs, logic power, and logic ground. Hall sensors are typically "open collector" i.e. they can only sink current. Controllers have internal pull-up resistors built in, so there's generally no need to internally pull the Hall sensor output to the logic rail. However, power supply decoupling capacitors placed right at the Hall sensor leads seem to be helpful. The Hall cable should not be routed parallel or immediately next to the phase conductors, since the high switching currents in the phase wires can cause induced noise problems with the sensor cable. Update June 2013: Outrunner Hall Sensor appendages I hate to plug myself usually, but in the past few months, I've successfully developed and am currently selling a line of Hall Sensor Boards and Hall Sensor Mounts sized to several of the typical outrunner suspects. Placed externally, they allow a sensored-only motor controller to drive these motors. See the boards and mounts on Equals Zero Designs. I designed these specifically as a "stock solution" for everyone asking me how to add sensors to your outrunners! Step 5: Special Mechanical Considerations for R/C Sensorless Control Half of the point of this guide is me telling you to use R/C equipment in unintended ways, for better or worse. So I've compiled a list of guidelines that I have found to be helpful. These encompass both design and operation, so it's more of a state of mind to be in when you put together a vehicle using R/C components. The more electrical cycles your controller sees, the faster it will lock in AC motors (and brushless motors are technically AC - the details are inconsequential here) can have different ratios of electrical speed to mechanical speed. In the most basic 3 phase motor where there are 2 magnet poles and 3 stator slots (three coils, in other words), the electrical rotation speed is the same as the mechanical rotation speed. As the number of magnet pole pairs increases, so does the ratio of electrical speed to mechanical speed - in fact, one to one. A good animation of this principle can be found at the LRK site (the guys who made high pole count outrunners popular in the aeromodelling world) It's important to note that a high pole count motor does not necessarily have more torque, but to a limit, the addition of slots and poles divides down the Kv value of the motor (and increased the torque produced per amp such that they no longer needed gearboxes to drive propellers. The angle the motor has to move between magnet-stator pole alignments is lesser, so it needs to be switched often to accomplish the same displacement. However, because sensorless controllers depend on having a regular motor voltage waveform to "lock on" to, this can be advantageous. Gearing down the motor does two things. One of them, as stated, is the fact that the controller has to switch more in order to accomplish the same displacement. When coupled with vehicle mass and the motor's own rotational inertia, it tends to result in a smoother BEMF profile. The header image is a picture of what a motor terminal voltage profile actually looks like when it's running. The second effect is that... Speed reduction also divides your inertia. Remember when I said that aircraft propellers aren't inertial loads i.e. need nonzero torque to begin accelerating? To approximate that in a vehicle, you need to reduce your inertia as seen by the motor as much as possible. It can be shown that the apparent mass of a load as seen by the motor goes down as a factor of 1/ R^2 , where R is the speed reduction ratio (i.e. R to 1). You can already see that if you gear down a motor even around 8 or 10:1, you pretty much become nonexistent as far as it's concerned. Therefore, sensorless drivetrains benefit from very high geardown ratios It's important to note that no matter how far you gear your motor down, some amount of push starting is needed . It may be as little as the action of jumping on the vehicle that scoots it forward slightly, but with the possible exception of absurd limiting cases, you won't be able to just stand on it and hit the throttle. It may work, but the start will be unstable and jerky (a phenomenon in the R/C world known as "cogging") and causes high currents to be drawn. Extended attempts to launch from standstill can therefore cause quick ESC overheating and failure. So that means... Don't floor it. ...unless the vehicle is already moving. Sensorless operation at low speed usually requires a steady finger and can be unstable. The controller may lose sync with the motor, which just means it switches the wrong phases at the wrong time. Sync loss can also occur if you're moving slowly and then suddenly increase the throttle command. Most controllers will assume the motor can spin up at some minimum angular acceleration - which, with your load on it, may not be true. This also results in sync loss. Summary Sensorless R/C controllers can result in a vehicle with unexpected (or just underwhelming) standstill and low-speed performance. Gear down a fast motor with a high ratio can alleviate this issue, so keep it as a design consideration when running drivetrain calculations. Step 6: R/C Controller Examples and Usage General Guidelines for Picking Generic ESCs High voltage specification and Battery Elimination Circuit The cutoff for when a controller is considered "High Voltage" seems to be around 6S lithium batteries, or around 24 volts. Below this voltage, controllers often come with linear regulated power supplies built into them which can power (in their intended application) a receiver and servo motors, called a BEC. This is handy for when you need to convert a throttle signal (more on that later), but the logic power supply is often not enough for things like lights. However, linear supplies work by burning off excess voltage - therefore they tend to run hot and the power dissipation is unacceptable for voltages much above 20 volts for most 5 and 3 volt regulators. Therefore, some controllers will advertise a "SBEC" or "UBEC", both designations for a switching regulator built-in. These types are usually more expensive for the same amp count. Controllers designed for use at above 6S ltihium cells (8S-12S) are usually advertised specifically as High Voltage. These usually do not come with a BEC and the signal wire only exists to take inputs. It is generally not recommended to operate a R/C controller anywhere near its maximum rated voltage. Due to cost cutting, components with breakdown and failure voltages just barely above the advertise threshold are often used - 25 volt capacitors and 30 volt power semiductors in a controller rated for 6S (22.2v nominal, 25v peak) are common. Power surges resulting from motor speed change can exceed those limits and cause rapid (fiery, smoky) controller failure. For EV systems, a HV type controller is most likely necessary unless you are intending to run an 18 volt system (5S lithium batteries or 15 nickel cells), which is actually just fine and workable - but less power than I would like. Speaking of "rated", it's important that you... Derate everything. No, seriously, everything . A controller the size of a credit card will not actually flow 100 amps all day at 36 volts. The specifications given for aircraft controllers are generally a continuous operating current at maximum temperature of the components assuming a constant high speed airflow is present and there are no surges . In aircraft, this is expected since propellers and fans create high speed exhaust streams, and throttle is generally constant and unvarying during cruise. In a vehicle... not so much. In fact, the opposite is true. You might be tempted to stuff the controller into the frame, this eliminating the airflow cooling potential. And a vehicle is pretty much entirely a surge load. Most industrial controllers achieve their ratings using a continuous thermal equilibrium method too, but with no assumptions of cooling. Surge ratings are often a function of maximum acceptable semiconductor or circuit board trace temperature rise, or possibly a maximum voltage ripple specification. Things aren't that strict in the modelling world. Given no airflow (convection-only cooling), it's wise to derate the "continuous" amps by at least 3 to 5 times and the maximum current, usually given as no more than a few amps above the "continuous" rating, by 25% to 50% (i.e. reducing 100 amps to 75 or 50). If you can build in active cooling, such as a fan or waterblock, you are much better off. However, I'd still derate continuous amp draw by 40% or more (i.e. a 100A controller is a 50-60A controller). Peak demand on acceleration can of course exceed this maximum, and the more frequent the excursion the closer to the lower end of the derate is necessary. Frequent start-stop driving in a city environment would warrant more derating, cruising along a trail or roadside less. It depends on the application, but current capacity must be derated as part of design. Soft-Start and other special features are useless Many ESCs come advertised with a 'soft start' feature. What that does is reduce the amount of current the controller delivers to the motor upon startup, such that your airplane doesn't start spinning instead of the propeller. However, we're not driving propellers with this, and therefore all the soft start does is make the vehicle more likely to cog and rattle on startup. Lower current means lower force to overcome inertia. You must check the calibration settings to make sure it is turned off. Another common feature is speed governor mode, commonly used for R/C helicopters which keep the rotor speed constant but vary thrust by changing the blade pitch. Speed Governer mode sets an internal control loop that links motor speed with throttle position. While this may sound like what an ESC should be doing in the first place, the default most is in fact open loop - if you set a certain throttle, and then grab the motor and apply load, the controller does not try to compensate for the extra load, and the motor slows down. With governor mode on, the controller actively ramps up the power applied to the motor to try and keep the speed constant. I imagine this would function kind of like eternal cruise control on a vehicle, but I have never experimented with Governor mode on my own. Sudden changes in speed command with a closed loop control are liable to cause current surges no matter if the command difference is large or small, in my opinion, and it should be avoided. Timing is important While the technical explanation is mostly out of the scope of this Instructable, you will also see things like 7.5, 15, 22.5, 30 degree, etc. adjustable timing on the controllers. This part is important: timing refers to how soon the controller switches to the next motor state in relation to the magnet rotor position. This affects the current draw and torque production of the motor at high speeds. The simple technical reason is that winding inductance causes slow current switching in the motor coils, and at high speeds, the stator magnetic field generated by the windings falls behind the magnet rotor position, causing reduced torque and wasted power since all of it isn't going to producing torque any more. For outrunner motors especially, high timing is crucial. The massive iron cores in most outrunners have lots of inductance, and on most product pages they will even warn you to use "high timing" for outrunners. By the way, adjusting this timing advance on-the-fly using current and rotor position feedback such that the motor always produces maximum torque is called field oriented control or vector drive . If it looks alike, it's probably alike. Most "cheap" Chinese ESCs descend from a few original bloodlines and are knockoffs of eachother. It's some times amusing to watch emergent strains that combine features of other controllers into one, and Hobbyking is a veritable tropical rainforest of a controller ecosystem. Since most of these controllers are made by a few houses and just colored differently depending on the contractor or distributor, one behaves much like another for non-HV types. The difference between one LV 80 amp controller and another on Hobbyking are probably minimal to literally none. HV designs are more varied, however, so this guideline doesn't really apply for them. Controller Examples So now I will introduce some of the common aeromodelling controllers I have seen or have personally used in vehicle builds. Most manufacturers and distributors will explicitly warn you against using their controllers on vehicles, since as mentioned before, vehicle loads and propeller loads are vastly different and the software written for one is suboptimal for use on the other. So I guess I should really say that you should not use airplane controllers on any kind of passenger-carrying ground vehicle. ... Now then, onto my personal favorite for these kind of things, brought to you once more by Hobbking: Turnigy Sentilon 100A HV Pictured in the lead image for this page, this controller has some of the best low-speed stability (lack of cogging) I have seen. It seems to be a Hobbyking exclusive version of a HobbyWing (watch those letters) OEM design , which seems to be a possibly grey market evolution of the Jeti Spin Opto 99 . For an airplane controller, it seems to be rather primitive and limited in configuration options; but this is what makes it well-suited to being a vehicle controller. This is the controller I have run in melon scooter and several of my hub motor scooters in the past. This thing can actually sustain 90 to 100 amps in strong airflow (having been tested on a ducted fan experiment of mine), but in vehicle usage with convection cooling I would try to make cruise current no more than 30 amps and keep peak currents under 100 amps. The second image shows what happens when you try to pass 140 amps through it for 5 seconds. However, otherwise, at a dollar per amp, it's quite economical for powering vehicles in the 1 to 2kW range. Castle Creations Phoenix HV 110 Castle is a long-time manufacturer of model controllers whose products have also seen extensive usage in small EV conversion. The Phoenix series has the reputation within the electric bicycle community of starting and running just about anything - meaning its estimation algorithm is robust and can handle faulty or poorly wound motors. I haven't personally used the Phoenix line since they are much more expensive, but Jerome's scooter uses a Phoenix 60 with a 50mm type outrunner. Being a reputable brand in the R/C community, I would expect their ratings to be closer to the truth. Larger Sensorless Controllers There are indeed much larger sensorless controllers, up to 200 and 300 amps capacity. However, while I will list them, I will not recommend them - this amount of power should be handled by a real vehicle controller, which can pay attention to how much current it is passing (R/C controllers do not actually measure and control current draw), and have more robust designs, more failsafes, and vehicle-related features. The "legit" brands are getting close to, or exceeding, the cost of a vehicle controller for the same number of amps anyway! The Turnigy Monster 2000 seems to have had a first-generation spontaneous combustion problem, but more recent reviews are positive. The JETI Spin 200 and 300 are some of the best model ESCs available, and are ungodly expensive as a result. If you want even more extravagance, Schulze is like the Armani Exchange or something of the R/C world, and they have 300 to 400 amp designs . Their controllers also feature options like full datalogging and sensored commutation (but if you want sensors, there are better options... read on!) Update November 2013: I've added some more controllers to this list; ones that I've used, have, examined, or otherwise have messed with. Trackstar 200A 8S 1/5 Scale This humongous thing is the subject of a full teardown on my website. It has ultra-low resistance, a ground-vehicle optimized starting routine, and active fan cooling. Based on the semiconductor count and thermal design, I think this can push 70-80 amps in a vehicle application, and definitely nameplate for acceleration. I'll have more to say about this once I have it on one of my own devices, but for 50 cents per (nominal) amp with actual robust construction, it's most likely legit. dLux 160A HV This line from Hobbyking seems to be a derivative of the Castle series below. They have a double sided heat sink (and the FETs on both sides to use them). I paired one of these with a 50-60 type outrunner, and two of that combo push around my go-kart BurnoutChibi with no problems. Based on examining their power devices and board layout, I think these are suitable for 25-30 amp continuous current draws in a vehicle application, with short bursts up to nameplate. There are "shorter" versions of it with 120 and 80 amp nameplate ratings, but I'd say the overhead is worthwhile since this is an air ESC and the starting algorithm is basic. Generic "200A" Suppo-type controllers These are marketed under various brand names with slight variations - I took apart no less than 4 of them in the same post that I investigate the Trackstar 200A. I both love and hate these, because there's some things about them that are nice but most things about them are disappointing. The nice things: - They are extremely simple and basic programming and intelligence-wise - The number of devices in parallel make for good burst and acceleration current overhead, from sheer thermal mass - The three stacked layers of FETs mean that there is extremely poor cooling no matter how hard you fan it - only the top layer receives significant cooling. You'd need to unshell the controller and fan it directly, and even then the center "core" is likely to remain hot. - The heat sink often is installed incorrectly and might cause phase-to-phase shorts as they contact the output wires - They're low voltage, relying on paralleling many shady or off-brand semiconductors to achieve the current rating, and the switching time is long, so they switch inefficiently. Types to watch out for I only have two data points as of right now based on personal experience for controllers to absolutely avoid: The "K-force" series from Hobbyking and the "Super Brain" are all very smart (read: specialized) controllers which implicitly perform soft-starting and locked rotor detection. They will treat almost all low speed loading as a stall condition and shut down. As outlined above, controllers that are too smart aren't well suited to running vehicles. Step 7: R/C Controller Communication and Signalling Because R/C controllers take as an input the hobby industry "standard" servo pulse, you cannot just communicate with it using a 3-wire potentiometer. A signal conversion must take place, and it can either be done in hardware or in software. The lead image is a graphic of a typical "servo command" pulse-width pulse: 1500 microseconds is a neutral or zero speed signal, 1000 microseconds usually full power one way, and 2000 full power the other. While servos and robot controllers respond to the pulse durations as indicated, a typical airplane controller is unidirectional. The most common "default" is 1000uS as zero speed and 2000uS as full speed, or something resembling that range. Of course, the ESCs can be recalibrated to accept different endpoints at will, so this distinction is mostly inconsequential. A Simple "Servo Tester" is Your Friend A vehicle throttle is basically a knob you turn to generate a signal to something which controls how fast the vehicle goes. In gas engined vehicles, this knob might literally have pulled on the engine's throttle body, but generally these days it's an electronic signal to the engine computer. In the hobby world, there exists a small device that is sold commercially which does precisely the task of converting a knob motion to "servo" pulses, and it's called a servo tester . An example is shown in picture 2; this one in particular . They are used in lieu of setting up a whole radio and receiver just to make sure your servos work. Notice that one (and most cheap ones sold today) have three outputs - this lets you test all 3 servos in a helicopter main rotor simultaneously. You only need to use one. The cool part about most servo testers is that you can directly drop a 0-5v source, such as a 3-wire potentiometer or commercial vehicle throttle, into the place of the knob (which itself is a potentiometer). The 3rd image shows a conversion I have done to that particular servo tester model - the 3 pin connector goes to a commercial hand throttle (more on those parts in a bit). After this stage is complete, you have a direct analog voltage to servo pulse converter that can be installed in a vehicle. Note that these come with different modes available such as "neutral", useful for keeping your servo centered while you adjust linkages, and "sweep", which automatically does a full 1000-2000uS sweep. These modes are not useful, and if accidentally selected during operation, can result in bad . It is recommend to remove the button which selects the modes. Full hardware method If you love torturing yourself, you can build a 100% hardware (i.e. resistors, linear integrated circuits, caps) timing circuit that will convert a 0-5v reference to the appropriate servo pulse. I link to a few in the older edition, but these days, you have no excuse to use a full hardware servo tester given that they cost like 5 cents. Hardware component values change with temperature and even humidity, and there are more parts, leaving them more prone to failure. Throttle ramping Because sensorless R/C controllers can't really control how much current they send to the motor (and current is directly proportional to torque output), and low-speed starting can be erratic and hard to control, it is useful to have a "ramp" somewhere in the throttle chain. Without ramping or some other kind of control input damping, sudden jerk motions of your hand or foot can result in the vehicle responding unexpectedly such as a sudden application of power. This is not only hard to operate, but it can be outright dangerous if you are in traffic or around other people. Ramping can be accomplished either in hardware or in software. Hardware resistor-capacitor filter In the fourth image is a "RC filter" type circuit, where RC is resistor-capacitor , which smooths out sharp transients in the control input. The general form of the circuit is known as a low pass filter, and its technical details will not be discussed (Wikipedia can provide a better treatment than I ever can). The values of your choice of R and C determine the damping "time constant", and that's just given by Trise = R * C . No, really, that's it. So in the example, a 100K resistor and a 10uF capacitor together form a filter which has a rise time of 1 second - meaning if you floor it from standstill (like you shouldn't be doing anyway, right? ) the control signal takes 1 second to reach 63% (the defined standard threshold for this circuit). True settling time is defined as the time it takes for the output to reach over 90% of the final value, and is generally accepted to be 3 time constants (3 * T). You may adjust the R and C values to achieve however fast a filter you want. It is important that the resistor is of significantly higher (at least 10 times) value than the throttle potentiometer, and should be at least 10K ohms. The reason is that if the resistor is too close to the pot's total resistance, the potentiometer affects the filter's time constant strongly. I put 100K as an example, but with most vehicle throttles being 5K ohm resistors, try to use at least a 47K resistor. The capacitor can be any polarized or nonpolarized type, but if it's polarized, please put it in straight. The whole circuit can be soldered inline with a throttle harness. The second variant of this circuit adds a one-way bypass to the system such that throttling down is unramped - meaning if you let go from full throttle, it doesn't take a full second for the throttle signal to slowly come back down, it will do so almost instantly . The diode conducts if the input voltage on the potentiometer side is lower than the capacitor's voltage (which is the output), so it effectively shunts input to output for those positions. In software using a microcontroller Ramping is also achievable in software if you're making your own servo tester-like device anyway. For instance, the following Arduino code implements ramping by taking a R/C pulse in on one digital pin and spitting it back out on another. This was written by Arduino and motor control master Shane Colton . #include <Servo.h> #define THR_MIN 178 #define THR_MAX 870 #define PWM_MIN 1000 #define PWM_MAX 2000 #define SPEED_LIMIT 0.33 // fraction of full speed #define RAMP_LIMIT 0.13 // fraction of full speed per second Servo tgypwm; float pwm_f = (float) PWM_MIN; void setup() { pinMode(9, OUTPUT); tgypwm.attach(9); /*tgypwm.writeMicroseconds(PWM_MAX); delay(5000); */ tgypwm.writeMicroseconds(PWM_MIN); delay(5000); Serial.begin(9600); } void loop() { signed int thr = THR_MIN; float thr_f = (float) thr; float pwm_target = (float) PWM_MIN; unsigned int pwm_i = PWM_MIN;; thr = analogRead(7); thr_f = (float)(thr - THR_MIN) / (float)(THR_MAX - THR_MIN); pwm_target = thr_f * SPEED_LIMIT * (float)(PWM_MAX - PWM_MIN) + (float) PWM_MIN; if(pwm_target > pwm_f) { pwm_f += RAMP_LIMIT * (float)(PWM_MAX - PWM_MIN) * 0.05; } else { pwm_f = pwm_target; } pwm_i = (unsigned int) pwm_f; tgypwm.writeMicroseconds(pwm_i); Serial.println(pwm_f); delay(50); } Step 8: How About a Real Vehicle Controller? But there are such things in the world as inexpensive brushless motor controllers which aren't rudimentary and lightly built. They don't need a weird throttle converter, instead being able to use a 3-wire voltage source directly as the command. They tend to have real current (torque) control, variable electronic braking, and some times come with useful accessories such as a main relay driver or light controllers. It's like they were built for vehicles or something. Luckily, in the past two or three years, specialized small EV controllers have become very common and also inexpensive. Here's some general characteristics about them. They are sensored controllers Positive starting from zero throttle and no desynchronization from control inputs too fast to follow. Then downside is that you'll have to add sensors to your motor if it doesn't have them already. Current control and regenerative braking A brushless motor's inverter circuit is implicitly capable of performing regenerative braking - where kinetic energy is capture from the motor and returned to the battery. There is nothing magic about regen , despite it being a marketing tagline for all sorts of consumer electric accessories (and cars). Regen is not available on most R/C controllers just out of cost reasons and no real necessity. While regen can only capture back a few % of the energy contained in a moving vehicle, it can contribute to longer brake life and less necessity to use mechanical braking to make a small speed adjustment. Current control emulates the dynamics of a gas-powered vehicle better by keeping the acceleration constant. It also means the motor is limited from producing too much torque, which is handy for large brushless motors with very high torque capability. Multiple inputs and outputs including reverse Typically, an EV controller has at least two analog inputs: one for a throttle pedal or knob, and the other for a variable electric brake. It might also have inputs for throttle and brake switches which are tripped as soon as you step on either, and can signal to the controller that you indeed want to either go or stop (A single analog input cannot distinguish between you flooring it and a loose wire, possibly leading to dangerous situations). The brake switch would signal the controller to stop driving the motor because you want to stop instead. Maybe that's bad for doing burnouts, but it's not good to fight your brakes with your engine or motor anyway. Other switches and doodads include key switch inputs, reverse inputs, status light outputs, datalogging, etc. Better built Because vehicle controllers have to be more rugged, they are generally built inside sturdy metal cases which also act as heat sinks for the power semiconductors. Common small EV controller examples Like quite a few other situations in life, a Chinese company offers a solution for the entry-level and basic functionality seeker where American and European manufacturers could (or actually would) not. The undoubted champion of inexpensive small EV controllers is Kelly Controller . Back in 2007 and 2008, they were really shady... but things have improved alot there, apparently. Kelly's KBS line of brushless controllers has everything I just described up there and the typical price is about 2 dollars per amp... real amps . Not barely-on-the-edge-of-destruction amps. Kelly KBS Series These things are about the size of a 12oz drink can (except square) and the chunkiest model can process up to 7Kw (72v, 100A) peak and 3-4 kW average. The one I've had the pleasure of dealing with the most is the KBS36051 , which is a 20/50 amp type. I've found that 20 amps is a little sluggish for most vehicles, but that doesn't mean you can make fun go-karts using them. If you want more power, the KBL type goes up to 500-600 amps and 120+ volts. Now that's getting into electric car domain, so I'm actually not going to recommend them to small EV builders. They are also legitimately the size of a house brick or more. Kelly controllers can be extensively calibrated using the the shady visual basic application they come with and a USB-to-Serial dongle. Shady Electric Bike Controllers These some times pop up on eBay for $50 to $200. They function kind of like Kelly controllers, but are more rudimentary, not calibration-enabled, and were often produced for a specific model of electric bike sold in Asia. They come in fixed voltage and current ratings (and are internally current limited to that fixed amount). I have no specific product page link for these things, and the actual wiring harness changes depending on the model you get. They're made in nameless factories throughout southern China and have no brand name attached. There are definitely several "bloodlines" of them, and they might even warrant an entire Instructable on what the hell they actually are. I can only recommend going to eBay and searching "brushless e-bike controller " to see what I mean. Because of the indeterminate wiring conventions, I can't recommend these to complete beginners, but if you have been hacking around with EVs for a while, they are great starting foundations for modding controllers. Speaking of which... Community-modified Shady Electric Bike Controllers There is a large community of electric bicycle builders who have spent some time reverse engineering those controllers and are now selling upgraded versions of them as kits or whole modules. The largest concentration of these users are at the Endless Sphere Technology forums. *NEW*January 2012Sensorless E-bike controllers, a.k.a "Jasontroller" As I recently discovered, there are several nameless e-bike controllers which can run fully sensorless. They are my current recommended small EV control solution because their sensorless startup and run algorithm is optimized for vehicles, which are inertial loads (as covered before). Instead of forcing the motor to start by dead-reckoning a minimum frequency, which would mean jerking your vehicle to several mph, these controllers ramp up to speed in startup mode. While the "ramping" takes less than a second, the effect is very pronounced and different from R/C controllers - the launch is smooth and generally seamless and without "cogging" or pole slipping. Being generic e-bike controllers, they also feature current limiting and a variety of different brake inputs. The only downside so far is that they do not perform regenerative braking...and they're kind of big. As the only place I have found which retails these controllers is eBay, I can only link to confirmed sellers and general search terms. They are usually advertised directly as "Sensorless", or in some amusing roundabout way as "Without Hall". Current, bobzhangxu is the only seller which clearly advertises some of the controllers in his store as sensorless capable. The controllers I have investigated personally are retailed by him. I have performed a full teardown and inspection of a "350W" type in this site post (and the next several posts show integration into some of my vehicles, including the hub motored RazEr and the titular Melonscooter). I like these things alot now, since fiddling with proper Hall sensor timing is both critical and very much a pain unless you have an adjustable sensor mount. The nice things about them are: - They're sensorless. Not like crappy R/C airplane sensorless, but actually optimized with ramping functions for inertial loads. - Cycle-by-cycle current limiting, so they are fairly robust. Like their sensor-only brethren, this current limit can be violated and messed with in multiple ways, including bypassing the current-sense shunt or changing the MOSFETs out. - They are fairly large. Even the 350W is the size of an average single-serving juice carton or so, much bigger by volume than an R/C controller, but still smaller than a Kelly. - They're not that power-dense or robustly designed. Low cost means cheap components and modest over-rating ability. The circuitry is fairly generic, the gate drive is discrete and made of really cheap small transistors, and almost everything is thru-hole or large surface mount packaged. Being sensorless and not particularly high performance, they like motors with a reasonable winding resistance and inductance - something R/C motors are not known for. I've killed some by accidentally powering the motor with the drivetrain locked or restrained, and the resulting massive fast current spikes overcome the in-built protection and seem to destroy the gate drive circuitry. They seem to have some trouble starting up large airplane motors such as the melon (as-tested on melonscooter),so I would recommend them on the 60mm type motors at most. Else, you may elect to rewind your motor to a reasonable BEMF constant. They're designed with bike wheel hub motors in mind, which have many poles, are slow, and usually high resistance and inductance - a case proven by testing these on RazEr. Finally, they have a commutation frequency limit. This is a point where the ESC can no longer keep up with the motor, and as a result could lost synchronization and draw huge current spikes. While airplane ESCs, with their simpler commutation dead-reckoning scheme, can usually commutate up to 3000+ electrical Hz (maximum speed of switching the phase outputs from one motor state to another - this has nothing to do with the PWM frequency), the Jasontrollers seem to be limited to only 500 electrical Hz or so. To convert eHz to mechanical RPM, divide by the motor magnet pole pair count, then multiply by 60. This implies that for an average 14 pole outrunner, the highest attainable motor speed should be no more than about 4,700 RPM. Again, this implies that they were built to drive bike hub motors. If you want to utilize these controllers effectively, you should make sure your motor doesn't regularly go over 4,700 RPM, which with a fast and high Kv airplane motor, could prove difficult. *New!* June 2013: "Miniature" e-bike controllers These controllers seem to be in the same family as the Jasontroller above, but they are substantially smaller. I purchased these from eLifeBike and performed a teardown on my website as usual. The smaller size, especially when uncased, means it can be stuffed into smaller and more portable vehicles! Their behavior, including the unwillingness to play with low-turn R/C motors and top speed limit, mimics the Jasontroller. Summary When you're ready to make the jump from super hacked and rudimentary to something more legitimate, a real EV controller is the way to go. Kelly is generally the most well-documented and supported mass production upgrade path, but there are options worth experimenting with if you want to go out on a limb and learn something about how motor controllers work. Step 9: Batteries: the Good, the Bad, the Ugly, and the Cheap While battery technology hasn't quite gathered enough punch to take on the automotive market, modern batteries are perfectly capable of powering personal electric vehicles. The focus of this Instructable will now shift to giving a brief overview of the types of battery technologies easily available to a builder, where to get them, how to charge maintain them, and most importantly, how not to use them. The primary topic will be lithium ion batteries of various types. Most of the details on battery types is better gleaned from the likes of Wikipedia, so I won't go into too much quantitative analysis. Forget about lead and nickel. Still the most common in small electric vehicles you can buy just because they're cheap (and heavy... does that make them the Chevy cars of the battery world?), there is absolutely no reason in my mind for staying with lead-acid batteries today. There's almost no point to building a light personal vehicle if it's going to be weighed down by 50 pounds of lead. Typical small 7 to 18 amp-hour SLAs have poor cycle life when used in vehicle situations, becoming unable to retain more than 80% of capacity in as little as 300 charge discharge cycles. Lead-acid batteries are more suited to long-term backup power provision...which is incidentally exactly what they're used for the most these days. Hint: Backup power systems usually don't move around. The upside is that they're pretty damn easy to charge, and lead chargers are very cheap. If you want to kick it old school, nickel cells are still a strong contender for personal EV use. They have 30-60% more storage capacity than lead and on a weight-for-weight basis can dump more watts (higher discharge ability) and charge faster. However, Ni cells are not the cheapest option any more - high capacity, low resistance cells are still expensive. A typical 4.2Ah, 1.2V nickel metal hydride cell can still cost $7 or more per cell, and you're going to need at least several dozen to make a useful pack. Many R/C vendors have completely droped Ni cells from their lineup, but places like Batteryspace (and any other of the front companies for shady Chinese battery vendors) will have them. Battlepack is a reputable dealer who has served the robot community for many years. NiCd and NiMH cells are also very easy to charge and do not need battery maintenance systems, however. Lithium is where it's at. I had to think for a while before advocating publicly the use of lithium ion batteries, because it is rather easy to mess up with them. Along with the 100-150% higher energy density compared to lead-acids that lithium batteries offer, they also bring to the table less tolerance to over- and undervoltage, more propensity to enter thermal runaway when those conditions are reached, and the need for battery management systems or more careful charge monitoring. However, they are also the most versatile and potentially cheapest option (oddly enough) for powering your vehicle. Here's a brief overview of the subtypes of Li batteries. Lithium cobalt oxide, hard case cell (i.e. laptop batteries and their ilk) These are the classic "lithium ion" battery, and are 3.6 volts nominal, 4.2 volts charging. The most common form factor is the 18650 cell which measures.... 18 millimeters across and 65mm long. Typical cells in this size have a capacity of 1.8 to 2.4 amp-hours today. I'm actually not going to focus on them too much, since you can generally only get loose cells (which need assembly) and they are also not built for high-rate discharge. Lithium "polymer" soft pouch cell I don't know whose idea it was to stuff a battery into a sandwich bag, but Lipolies are now the most common lithium battery around. They come in Cobalt Oxide and Manganese Oxide types, but the subtleties are negligible. The nominal voltage is 3.7 volts and 4.2 volts while charging. They are usually flat cells made in various rectangular shapes. Lipolies are now the standard in the R/C world, a fact I will expand upon. They can be made to have incredibly high discharge and charge rate tolerance, and are generally close to the theoretical maximum 240 watt hours per kilogram possible with lithium ion batteries today. Because they're mass produced hobby parts, they are also cheap - possibly the cheapest battery option you can get. However, being soft-cased, they are easily damaged, and can become very dangerous if damaged mechanically or shorted out. Their high discharge capability is also a curse, since short circuits can easily turn into electrical fires fueled by hundreds of amps. Lithium Nanophosphate (LiFePO4) The latest heavily commercialized lithium battery technology is the LiNP battery, whose poster cell is the A123 26650 (by none other than MIT-affiliated A123Systems). The "nanophosphate" part refers to using lithium-transition-metal-phosphate nanoparticles as the cathode. The short story is much higher discharge and charge rate tolerance than a classic Li-Co cell and much less propensity of lighting on fire. The one downside is cost and somewhat reduced specific energy density. If I had to name one reason why I would advocate LiFePO4 over everything else, it's the amount of abuse they can take for being lithium chemistry. While no battery is impervious to catching fire, overcharging an A123 cell is generally more palatable than an equivalent Lipoly cell - they tend to fizz and vent hot gas and electrolyte rather than shoot flames . There are certainly options for getting your LiFe packs premade, and I'll talk a bit about that. LiFePO4 cells are 3.2 volts (or 3.3... I find 3.2 easier to do math for) nominal, and 3.6 to 3.8 volts charging. All lithium chemistries require special chargers which use constant current constant voltage charging, and LiFe cells in particular require different termination voltages (A Li-Co or Lipoly charger cannot be used directly on LiFe packs). Loose cells vs. packs Alot of the time, you can find loose cells being sold next to battery packs. Due to the varying experience levels of potential readers, the physical dangers of working with loose lithium batteries , and the special soldering techniques needed to assemble a pack, I am not going to advocate people making their own battery packs here . Premade packs will already have individual cell taps (crucial for a future battery management system or charge balancer, to be details soon) instapped and wires pre-attached. In some cases, they come with a charger and integrated BMS. Step 10: Premade R/C Lithium Polymer and LiFe Packs This whole Instructable might be summed up as "How to Shop on Hobbyking", because that's exactly what I'm about to tell you to do. There are several reasons why I focus on HK's lipoly selection and LiFePO4 selection specifically. First, there are tons of them , in varying grades, capacities, and physical sizes. They are well-sorted into categories, and the customer review system is extensive. HK also has a good battery warranty system now. There was a time a few years ago when their batteries had poor quality control problems and overrated numbers. However, having gone through several packs (purposefully and accidentally), I generally believe their numbers to be reliable now Oh, yeah, they're also cheap . Here's some things to watch out for when you're sizing up a battery to use. The C rating In batteryland, the unit "C" is one battery capacity in amps . In other words, for a 1.0Ah battery, 1C is 1 amp. 10C is 10 amps, and so on. Batteries are almost always rated for a certain continuous and burst (order of several seconds) discharge rating. High C-rating cells have stronger, thicker interconnects and are physically bigger to handle the power throughput. Higher C packs will exhibit less voltage drop on high current loads. For a battery of reasonable capacity, you generally don't need the insane 40-50C discharge packs, which are comparatively more expensive. What I have found is that only the 30C type cells and higher have copper interconnects, which are crucial for high current sustained draw. Keep in mind that the cruise current of a small EV should be dictated by the maximum continuous current of your motor (which contributes to motor heating), and with the common 50-60mm motors should be 20-30 amps at the most, which for a 5Ah pack is only 4 to 6C. This doesn't mean you should get a cheap 15C battery, however, because the current demand on startup can easily exceed that rating and frequent excursions can cause excess battery heating and short life. The capacity Sizing battery capacity is wholly up to the designer. The amount of energy a vehicle needs to travel a certain distance is given in units of Watt-hours per mile (or per km), and is the same thing units-wise as (inverse) miles per gallon, or liters per 100km. Many factors come into play here, including the cross sectional area of the vehicle (drag), whether you're going up or down hill, how inflated your tires are, how much you weigh, the roughness of the road, did you oil your bearings, etc. In other words, there isn't really one definitive easy rule of thumb for battery pack sizing. However, one good resource to use as a lookup table is the Austin EV EVAlbum - you can search for vehicles similar to the one you are designing and see if Wh/mi data has been collected by other builders. Small electric scooters seem to average between 25 and 40 watt hours per mile. My own RazEr hub motor scooter is the low end of that (25 Wh/mi measured), and melon-scooter is a total battery hog due to its huge motor and knobby tires at 39 to 42 Wh/mi. Keep in mind that you can never get 100% of nameplate capacity out of the battery. Typically, a battery charge-discharge cycle in the EV world is defined as 80% state of charge to 20% state of charge . A Li battery is finished "fast charging" when the constant current stage finishes, and it is at approximately 80 to 85% full charge then. 20% is an accepted minimum safe discharge threshold. So really you get about 60% of the battery if it is used with regularity and not allowed to top off (say overnight). Multiple cells in parallel vs. multiple packs in parallel Some packs are available as multiple cells in parallel internally (indicated as "xSyP " where the P is number of cells in a group). While it is tempting to just put multiple packs of single cells in parallel, you have to have a dedicated charger for each pack, i.e. can perform charge management on each one individually. This is necessary in a parallel pack situation because there is no guarantee that the same cell in each pack will discharge at the same rate or to the same level. If the packs are charged all at once in parallel, then these cells could settle out to different voltages over time. Because Li batteries cannot tolerate overcharge or overdischarge, periodic balancing is then required, and having to balance multiple packs is a PAIN IN THE ASS . Guess how I know. This isn't to say it isn't possible, but you should be prepared to do some battery babysitting every once in a while. A better option is to get the largest single-cell pack you can. If you are making your own pack, put multiple cells together first such that they act as one large cell. Balance plugs R/C packs come with a small connector that breaks out the middle of each cell connection. This often used for battery balancers which may or may not be built into a charger - more on charging later. The most common connector is known as JST-XH , which is good because it uses a standard 0.1 inch (2.54mm) pin spacing. The connectors (as components) are extremely cheap on places such as Digikey, and you can also buy harnesses, adapters, etc. which combine packs in series or parallel. If you are running multiple parallel packs , make sure to grab a parallel balance adapter or two (example , with different connectors ) and always charge using it. Mechanical protection Lithium polymer batteries are soft and plushy, and HK's LiFePO4 flat cells are no exception either. R/C models are generally also soft and plushy, and so this is not a problem in their intended application. However, vehicles are usually not very soft. It is essential that the battery is mounted properly or inside a shell or protective housing. Always support the whole length of the battery Never just zip tie a lithium pack to something - the concentrated stress of the zip tie will damage the cell mechanically and could potentially lead to a pierce-like thermal runaway situation. Never compress just the battery between surfaces Just like too loose hanging can be bad, cramming the battery between two surfaces can be just as bad. Excess pressure can be placed on the roll-sealed cell edges and they can burst open, and then Bad happens. No sharp things around the battery Not only can they cut wires and destroy insulation leading to frame power faults, but sharp sheet metal edges can also cut open the cells. Keep the battery away from water, road debris, etc. Water (and if you're from where I am, salt and de-icing chemicals) seeps into the layers of the battery pack wrapping and can cause fast corrosion at the terminals, helped along by electrolytic action. Ideally, the battery should be in a hard case that fits it perfectly but transmits all the mechanical loads of the vehicle around the battery. If I can't stress this enough, then I can only say that people have made entire youtube careers predicated on setting lithium batteries on fire . Go watch enough of them - you will be more careful. Incidentally, HK has an entire line of hardshell packs for use in R/C cars (which are more liable to smashing into things). While I have not used them, I believe them to be good things. General Appraisal Based on vehicles I've built or seen built, a good starting point for an electric scooter is a 5Ah pack of 6S to 10S (which may need to be 2 packs in series), of 25 to 30C discharge specification. This can be gotten for about $100. If you take the cheap LiFePO4 option, this is closer to $150 or $200 (as of this writing). Step 11: A Sneaky Premade A123 Pack Option There's one segment of the consumer market where A123 cells are sold in packs with integrated battery management systems and bundled chargers and used every day by people who aren't electric vehicle nuts. That's the high-end cordless power tools market. Specifically, the DC9360 36 volt lithium ion battery for DeWalt's 36 volt power tools line has been the subject of great praise by the crazy electric bike guy community. A DC9360 battery and charger (Dewalt part number DC9000) set can usually be obtained on Ebay or Amazon for $200 or so, and the battery itself around $120. For 10 A123 cells with an integrated battery manager, that's quite a steal at market prices. It is much more expensive than the Hobbyking option, but A123 cells are serious quality. The DC9360 does need a bit of hackery to turn it into a full fledged EV battery, however. BMS Hack The battery has three terminals: 1. Positive, which is connected directly to the cells inside. 2. Negative terminal connected through a hard 15 amp fuse inside the BMS module , distinct from the fuse described below. Pulling more than 15 amps from this would just blow the fuse. 3. Another negative terminal connected through an internal power MOSFET. Several hacks have been documented on the BMS module's internal workings which indicate this FET is used as a speed controller for the tool motor. A document which details how to connect to the BMS such that the FET is turned on (allowing the higher current output to be used) is here . It also describes a multi-battery "ORing" system which any number of batteries can be paralleled and swapped in and out. I can't vouch for that circuit, however, since I've never built one. Another option which has been validated is to simple solder wires directly from the positive terminal and negative terminal of the cells, completely bypassing the BMS for discharge purposes, but continuing to use it for charging. This operation usually leaves a discharge cable exiting the rear of the pack and leaves the tabs and connections on top unmodified. Fuse Bypass Once cell interconnect inside the pack is deliberately made narrow so it acts as a fuse if the current draw becomes too high or the battery is shorted past the BMS. The location of the battery-level fuse is detailed in this RCGroups post (the post itself describes the complete removal of the BMS module, which is great if you want to make your own packs, but otherwise not helpful). This narrow area can be bridged using copper desoldering braid, grounding braid, or just some pieces of wire. A large soldering iron (60 to 80 watts) with a high thermal mass tip should be used to solder the reinforcement without heating up the cell too much. Many resources on the Intertubes have been compiled by Crazy Electric Bike Guys about chopping and screwing these batteries - here's another one. Step 12: *NEW July 2012* LiFe Lead-Acid Replacement Modules How I love progress. In the past year or so, several battery companies have produced "Lead-acid replacement" packs which are essentially LiFe cells in a box, with charge management circuitry that makes them compatible with conventional "dumb" lead-acid chargers. They generally feature auto charge cutoff, internal cell balancing, fused or protected outputs, and other nice features which makes your electrical systems plug-and-play. The downside is, of course, added cost. They're also not the best in terms of energy density, of course, because they're designed to fit into applications for existing industrial battery sizes. I've listed some possible vendors below along with commentary and use considerations. K2 Energy My current favorite for SLA replacement bricks because I've actually had the chance to use them and they're commercially available, these are from a somewhat under-the-radar battery company called K2 Energy, but they've actually got product. Nice product. The models in question are the K2B12V7EB and K2B12V10EB, both of the "7Ah" lead-acid battery format. The 12V10 has an extra cell in parallel for higher capacity in the same package. These modules are rated to 25A discharge with 40A peaks, which is more than enough for average cruising vehicles, but may not be sufficient alone for a drag racer setup. It's important to note that they come in a version with internal battery management module, as well as without. The "B" in the part number indicates the presence of the BMS board. The plain version (e.g. K2B12V7E) is, as far as I understand it, just a box of cells. The cost is usually $30-40 less than the BMS version, but if you are just getting a box of cells, then the cost per watt hour is not very competitive at all with HK lithium iron phosphate packs. I've taken apart one of the 12V7 modules on my site because I was curious about the internal layout of a SLA replacement module, so if you also want to take a look inside, the post is here. These generally retail for $140 for the 12V7 and $170 for the 12V10. The watt hour per dollar cost for the 12v10 is actually much lower: about $1.40/Wh for the 12V10 compared to $1.70/Wh for the 12V7, in the same package size, so I consider the 12V10 superior in this regard. A123 Systems My electric vehicle lab class in the Spring of 2012 was made possible in part by A123's donation of a whole set of ALM12V7modules. They're A123's own house-developed SLA module with some of the highest discharge ratings around. They have an externally accessible fuse, too, which is nice because if you blow up the internal BMS on any of the other modules, the whole module is trashed. If the fuse blows, the BMS shuts down until the fuse is replaced and then charging voltage is applied to the battery, upon which it will wake back up like a Pokemon. For the student-built vehicles, this happened several times and I'm kind of glad that the fuses are there as a result. I'm now going to stop singing the praises of A123, no matter how nice they were to us, and point out that YOU CANNOT BUY THESE. A123, in my opinion, suffers from "classy American company disease" where the company absolutely refuses to do business with end users and only seeks large supplier contracts. You can't buy the ALM12v7 online or really through any supplier - if you search ALM12V7, all the links consist of user guides and people who have gotten them secondhand or from insiders. A123 has done nothing to market these products to the people who will actually be using them in the end. No matter how good the product is compared to others on the market, they're impossible to get and therefore might as well not exist. But just in case they wise up, I'm going to leave this space to link to vendors of the ALM12V7s when they pop up. *NEW* November 2012: Yes, now you can buy them! The aptly named buya123batteries.com website is the embodiment of the company's efforts to actually, like, sell things. The ALM modules as well as loose cells in 18650 and 26650 size are available. Generic / unbranded SLA replacement modules There's a whole other world of shady Chinese batteries out there. Battery Space, my favorite Chinese battery front company, has an entire page of them. These likely use the ingredients sold separately elsewhere on the site (cells, PCM boards). I have neither seen these or used these in person, so I cannot recommend them either way. Many of them have seemingly low discharge rates for their capacity which may make them less suitable for vehicle use. Step 13: Care and Feeding of Your Battery Unlike nickel cadmium and lead acid batteries, you can't just throw a lithium pack onto a power supply with a fixed voltage and trickle charge it until it's finished. For long life and safe charging, a lithium battery needs a charger that was specifically designed to charge them. Typically, you also need a system to keep the individual cell levels equal, a process termed balancing. The same goes for LiFePO4 as it does for LiCO and lithium polymer packs. Fortunately, since 2007/2008, there have been massive reductions in price and increase in accessibility for fully integrated charger-balancers. R/C Hobbymultichargers with integrated balancers I use the term "multicharger" just to refer to a charger which can charge and manage most battery chemistries. These are by far the most common and the cheapest type out there right now. The cost is dependent on how many Li cells you want to charge. The cheapest (such as this thing ) can usually charge 6 cells in series, that particular one at a rate that is limited by its 60 watt power - 6 cells (22.2v) means a charge rate of about 3 amps instead of the maximum of 6. High voltage (10S capable) chargers are much more expensive. I have the IMAX 1010b+ and am very satisfied with it (it's my main robot AND vehicle charger), but it's quite pricy. The new 3010B+ seems to have the same capabilities, but with even more power dumping ability. All of them need a 10-18 volt (12 is the usual) power supply as the input, which may raise costs depending on what you already have or want to buy. These chargers can be set up and left on the battery setting you use the most. They will then retain that setting after a power cycle - so really all that needs to happen is plugging it in. They have built-in balancers and cell voltage monitors. Overall, getting a pile of these seems to be the most economical option since the solution is all-in-one, or perhaps all-in-n if you have a complicated electrical system. If you obtain an isolated 12 volt power supply for each small charger, you can literally just connect the outputs in series to charge especially weird battery configurations. I explore this in making a 4-channel series charger for a quadrotor project. The supplies must be isolated (notice how I keep bolding that word isolated ) so the chargers don't interfere with or destroy eachother as soon as they're plugged in! Another multi-channel multicharger project is documented at the Jedboard blog . Dedicated single-voltage charger with offboard BMS or balancer There are plenty of battery houses that will sell you a dedicated single-voltage charger, with my eternal example of "shady-ass battery vendor" BatterySpace's selection here . They are really not much less expensive than an R/C charger and you lose the flexibliity of changing cell counts and integrated balancing, but they 1. plug straight into mains power and 2. can be the only option if you have very high cell counts like 12 to 14S and want a decent charge rate too. Alternatively, 3. you know it's the only battery config you're ever going to use. Besides nameless dedichargers, constant-current LED sign driving power supplies have been pressed into battery charging duty. They are tiny CC/CV power supplies in a box - LEDs being current-mode devices, they keep the current limited to a maximum and allow the voltage to swing to a preset point. When the voltage is reached, current is allowed to taper off and vary. If that sounds like how you charge a Li battery, that's because it is. The MeanWell PLC and HLG-A series have been explored for use as battery chargers at MIT. If your battery is conveniently near one of the available voltages, Mouser Electronics is a reliable vendor of them. You will need an external BMS or at the very least a cell monitor. One neat little device is called the CellLog 8M (which doesn't actually seem to support datalogging) which you plug into a balance port on a battery and it will monitor the voltage on the individual cells - and alert you using an annoying buzz if any go over or under preset Li battery thresholds. At the least, that tells you to stop charging . Quite a few "kit" BMS systems for EVs are available. I can't say i have ever messed with any of them, since I have always used the first route. The "MiniBMS " project is one example which was developed from several electric vehicle forum members. Dedicated single-voltage charger with manual balancing for A123 cells Truth be told, balancing is not an activity you perform frequently. It is more common with cheap R/C equipment and batteries because those cells are generally not matched before being assembled into packs, since that QC step takes alot of time (read: costs alot of money) and most users of the packs will have balance-enabled multichargers. However, if you are running A123 cells, the MITelectricscooterflock has found them reliable and well-matched enough such that balancing is only something you do every few weeks to a few months; and then only after checking the cell voltages to see if it's even worthwhile. For instance, on melon-scooter, I take the batteries out and check the cell voltages every semester/term. Generally, if the pack was well-balanced the first time (which a multicharger helps immensely in), the cells will not be too far apart. Mine have always been 0.02 volts or better matched at every inspection, but I've thrown them on my charger anyway because it's there. With the manual balancing system, the whole pack is charged until the Constant Voltage Stage cutoff - i.e. the final charging voltage has been reached and the current tapered off. The whole pack is taken off the charger and allowed to settle. Then, either low cells are charged one-by-one to match the rest of the pack, or high cells are drained using power resistor loads to match the others. Step 14: Electrical System Loose Wires So at this point I've covered most of the major electrical components of a small EV. I've basically laid out how to size motors and which ones are my favorites, which controllers (and control school of thought) to use, how to make sure you don't start a battery fire, and probably am making Hobbyking very nervous right now. I don't care, because I've blown my money there at least once a month for the past year or more. I think they kind of deserve it for that. What's left is random links and tips to other things i've found helpful for putting together an electrical system. Don't use a cheap Radio Shack rocker switch as a main power switch. Get a real high-current key switch or similar (the Hella key switch is very popular along with its Giant Red Knob cousin). A cheap rocker switch will erode very quickly from power-on sparking unless you have built a precharge circuit or inrush limiter (both helpful terms to search). Furthermore, even though a switch might look kind of beefy and metallic like figure 2, the contacts inside are still very small and can only pass 15-20 amps without overheating. Besides, every watt of heat going into cooking the switch is one that is not making it to the motor. Here's a whole asston of power switches from the RobotMarketPlace. Note: you should avoid the Hella knockoffs - I've measured the contact resistance of those things at upwards of 15 milliohms. This doesn't sound like much, but that's potentially 30+ watts of heat at 50 amps continuous! For lower powered vehicles, some times jamming the main battery connector is enough as a switch. A robust connector with a shroud like the XT60 type should be used here - while the common Deans "T" offers low resistance and compact size, I and others have found that they are difficult to insert correctly and you often miss multiple times (hence power cycling the system rapidly multiple times - not good for the electronics) Do NOT skimp on wiring. It might be tempting to just get some 16 gauge house wire to finish everything, but really on a vehicle your wiring runs should be as short as possible and as fat as possible . Long runs must be a heavier gauge than short connections. Every wire in a vehicle is technically a resistor, and resistors burn off power. Get some high quality "noodle" silicone-insulated high-strand-count copper wire while you're shopping for motors and controllers. The usual recommended capacities for copper wire are a good guide to follow, but keep in mind these are continuous amps with no surge - push 100 amps through a few feet of 14 gauge wire briefly and you will lose several volts. For instance, on melon-scooter, my main battery bus wiring is twin 10 gauge (i.e. 2 10 gauge positive, 2 10 gauge negative). This ensures the least resistance going into the controller that I can reasonably achieve. There's no need to use 4/0 wire on everything, but 12 and 10 gauge should be making up most of the main power distribution unless the vehicle is very small or low powered. Shield or twist all signal connections and keep them short. A vehicle controller is an electromagnetic nightmare because it's usually hard-switching dozens, if not hundreds of amps at several thousand times a second. It's a really hardcore, scientific way of touching 2 wires to eachother. The result is alot of EM noise broadcast in the local area. Long signal wire runs, such as throttles and motor Hall sensors, can pick up this radiated noise and it can cause interference at the controller. This often manifests itself as twitchiness under heavy acceleration or rapid throttle changes. Twisting wires together is one way to mitigate the noise issue. Another means is to run sensitive wiring, twisted together, through a conductive shield tube that is grounded to the vehicle frame or controller case. Small EV throttles are about as standardized as.... you know what, I can't even think of something so unstandardized. If you have a mystery throttle, chances are these days that it is a linear Hall Effect type sensor, not a potentiometer. If it only has 3 wires, you are in luck. One will be black, blue, or brown . Or some other dark color - this is generally ground (0v). Another will be red, orange, yellow. .. or some bright color, and is 5 volts input. The 3rd wire is generally green, yellow, or white - notice that there's alot of overlap, and is the signal output. These aren't hard rules - I have actually seen white be the logic power before, and orange has once been ground. If you have 4 or 5 (or more) wires, the same guidelines are helpful. It's almost better if you start with new commercial throttles (examples ). I personally favor thumb throttles over full grip ones since you can brace your hand against the handlebar with them. Here's a great example of a custom-made thumb throttle! Get a stiff 12v accessories bus because you can run any multitude of obnoxious lighting, sound systems, and... maybe like, control logic or something... off it. A DC-DC Converter which has your battery voltage within its input range and 12-15v output is desirable. These are available all over eBay - make sure to get the chassis-enclosed modules instead of open-board or board-mounted types. Some permanent vendors also exist, such as CurrentLogic . Notice that alot of "Discount EV" involves shopping away from mainstream vendors, and performing much eBaying. Lots of these "discount" parts vendors don't have permanent businesses or their own product line. I'm more than okay with this, though it can be difficult for some people to wander off the path of Amazon et. al. Step 15: Mechanical and Drivetrain Resources The vast majority of small EVs use either synchronous belt ("timing belt") or roller chain drive. If you're designing a drive system that isn't one of those two, I assume you already know what you're doing. This section will discuss the pros and cons of the two styles for people on the fence about drive components, as well as highlight a few places to get them. A good source of reading on any type of power transmission ever is the RoyMech website. In fact, they're great for reading about anything remotely engineering-related ever. Roller chain, the proven method Chain drives are by far the most common method for dynamically engineered vehicles since it's adjustable in length, very cheap, and strong. Chain also tolerates a degree of misalignment between sprockets, and is much narrower than a comparable belt - important for working in tight spaces. If you're whipping together something quickly, chain is probably the way to go. Avoid bicycle chain While it may be tempting to just grab chain from a junked bicycle, bike chain has very thin sidewalls compared to industrial power transmission chain, and usually hollow (and flexible, to enable derailleur shifting) rivets. The result is a chain that can easily break when subject to the loads of an electric motor as compared to a human. Chain size Common small chain for vehicles are ANSI #25 and #35, the leading number of the specification being the chain pitch (inter-link distance) in units of 1/8" (0.125"). By far, #25 is the most common, the cheapest, and can reliably handle 1 horsepower or so, more with reduced lifetime. Actual chain life depends strongly on the load and tension - gory engineering details can be found in places like The Complete Guide to Chain , but you can expect #25 to work for most purposes. Higher power systems, such as those over 2 to 3 horsepower, would find #35 helpful - as will systems which suffer from poor alignment and tension, due to the larger teeth of #35 sprockets. Unless you're making some kind of drag racing vehicle, #40 chain is utterly too large and heavy; it's used more for go-karts and mopeds. Chain tools To effectively work with chain, a worthwhile investment is a high quality chain breaker. These little things cost around $20 to $50 and have an adjustable screw handle to force out the chain rivets and let you cleanly separate chain and remove and add links. The type shown in the image is the most common, and is usually advertised to handle #25 to #60 chain sizes. However, I find that they are a little too large to reliably press #25 chain rivets - the punch tends to slip off the end of the rivet and deform the plates, which weakens the chain. Chain and sprocket vendors My favorite go-to place for chain and sprocket is Surplus Center . I consider Surplus Center to be like the Hobbyking of random power transmission hardware and farm equipment - they have a little bit of everything . Most of their power transmission parts are actually new and not actually "surplus". They have sprockets with hubs (useful on the motor side) as well as hubless plate sprockets for custom rim attachments. Oh, and bearing (mounted or unmounted)...and shafting, and shaft collars, and everything else you might need to build a random vehicle in one stop. The only thing they don't really have is timing belts. Remember to pick up master links and adding links for your chain. Another useful place to look is the very aptly named ElectricScooterParts sprocket division . I would recommend ESP to anyone seeking a complete matched set of sprocket and wheel . If you don't have fabrication facilities available, coupling a sprocket to a wheel may in fact be difficult. Of course, the the trivial answer is always McMaster-Carr - who has updated their website to be more friendly to people not knowing exactly what they are looking for. Their prices are very competitive with Surplus Center in the common pitches and sizes, and for most people in the Continental US, McMaster somehow delivers your order 5 minutes before it is placed. Random trivia: Chain is one of the only power transmission components in existence where metric specifications are based in inches. Metric chain is specified in units of 1/16" (0.0625") ... so a #4 metric chain is a #25 ANSI chain. #5 metric chain, some times called "8mm chain" is very rarely seen in some mass produced scooters and other small rideables. Timing Belts I'm personally a synchronous belt fanatic, since belts are quieter and less messy than chains. They do, however, require much more precise alignment both angularly and axially on a shaft. Timing belts also must be run under the correct tension. They can be marginally more efficient than chain (on the order of 5% for well designed systems), but this difference is in my opinion negligible for small vehicle use. Belts come in different widths, and the wider the belt, the more power the teeth can reliably transmit. Belts cannot be fudged as much as chains in this manner - in belts, the power transmission medium is made of little rubber and fabric backed nubs, which will be smeared and mashed if too much power is put through them at once. ANSI sizes The most common U.S. timing belt size is called XL (apparently for Extra Light, since the really big ones are XH for Extra Heavy) and has an inter-tooth spacing of 0.200". The ANSI type belts have trapezoidal teeth and are an older design. McMaster-Carr has a decent selection of XL pulleys and belting because of their prevalence in U.S. engineering. Because the vast majority of small EVs are foreign-made, XL belts are actually quite rare in that field. Metric HTD type belts The HTD belt is a staple for small power transmission uses. They have round (semicircular) teeth and were eengineered to transmit more power in the same belt size. HTD belts come in 3, 5, 8, and 10mm pitches (Real metric this time!), and the most common by far for small vehicles is the 5mm size. Typical 5mm belts come in 9 and 15mm widths - the 15mm being able to transmit that much more power. A metric belt is specified ina fashion similar to 535-5M-15. The first three digits are the total length of the belt loop in millimeters. To find the number of teeth, divide this number by the pitch. "5M" indicates this is the 5mm pitch, and 15 indicates the width. So this is a 107-tooth 5mm belt that's 15mm wide.... the most common individual size found in scooters. Metric GT type belts The GT belt is another round-tooth metric belt specification produced exclusively by Gates Corporation, a prolific rubber products manufacturer which invented and popularized the V belt several millennia ago. They are interchangeable with HTD belts of the same pitch. Get some belting Being more "high end" or "precision" components, places like Surplus Center and McMaster don't have good stocks of HTD type timing pulleys and belts. While ElectricScooterParts does have a selection of common belting for scooters, I find them to be horrifically overpriced - probably just taking advantage of people seeking hassle-free 1:1 replacements for a broken part. I don't blame them, but I wouldn't get stuff there either. However, if you're just messing around or building your own driveline, the place to go is Stock Drive Products . I have personally sworn off SDP until they update their web store to be less 1990s, but they are great in general and their metal timing pulley selection is extensive. Another belt house I've found is B & B Manufacturing , whose prices seem to be a little more competitive in the common small sizes. What belts McMaster-Carr do have in 5mm HTD, however, are very high quality (Kevlar-banded, Goodyear rubber). Hub motor drives Okay, I'm not going to let any scooter-related subject pass without mentioning hub motor drives, which I would pick as my "area of research" if you really pressured me to be academically productive. The MIT Electric Scooter Team has recently seen a surge in the number of hub drive powered vehicles. The motors, unlike those featured in my Just Make Your Own Hub Motor Already Instructable, are based off the "layered 2d plates" principle explored by Kitmotter and piloted by the BWD Scooter and the Jedboard . Here's one vehicle with a hub motor in progress, and here's a completed one (easily the stealthiest thing ever). Step 16: Good Mechanical Ideas This section will address some small mechanical details that can be headaches to those who are not machine shop endowed. If you do have metalworking tool access, then attaching two things together or changing bore and shaft sizes is generally a trivial operation. But I started building vehicles when I did not have extensive shop access, so the following advice is mostly aimed at the starving vehicle hacker. Set screws may suck, but they work well if done properly An old adage in the combat robot community I have been part of is "set screws suck". They rattle and come loose, they mar and damage shafts, they strip out, and are generally horrible to deal with. However, I contend that a large part of the disdain that robot builders have towards set screws is caused by them doing it wrong . While the average set screw you may receive with a pulley or sprocket can be tiny - the common thread on a 5M-15 motor pulley being #8-32 or #10-32 (metric 4 to 5mm) - what's more important is how the set screw is seated in the shaft. If a set screw only has a round shaft to grip, then all of the transmission forces must pass through a very tiny tangential contact patch. The result is the familiar marking and ring-shaped groove that forms when (not if, when ) the set screw slips due to shock torque or overload. To effectively use a set screw, it must have a flat or a dimple in the shaft to grip . A flat can be filed or ground and a dimple can be drilled using the pulley or sprocket's set screw hole as a template. It doesn't have to be deep nor expansive - any concavity will do. The contact patch is changed from a small tangential deformation to a much larger solid material interference fit. Now, instead of just overcoming a small ring-shaped patch of metal, motor loads have to essentially crank the entire set screw through the shaft. Another important but often overlooked detail with set screws is that they must be as large as possible to obtain as much face contact area with said flat or dimple as possible. Too small a screw can just shear off or indent the shaft and become loose. While having a flat or dimple is more important, I often take the opportunity to "upgrade" the set screw size on a pulley or sprocket - especially pulleys, because sprockets tend to come with keyway bores I can take advantage of. I've done ridiculous things like use a 5/8" set screw on a 3/4" diameter shaft, and just about the minimum I use on a standard 10mm diameter shaft is 1/4"-20 (or M6 coarse thread). Bore changing - large to small One of the hardest parts about putting together the power transmission system of a vehicle is how to attach the initial chain sprockeet or pulley to the motor. For those with a shop at their disposal, bore adaptation and remachining is a trivial matter. But otherwise, you'll have to pull some tricks and hacks to adapt the bore of the sprocket or pulley to be mountable on a motor shaft. If the shaft is smaller than the sprocket or pulley bore, it's relatively easy. Plain bronze bushings ("bronze bearings, oilite bushings") can be used as a shaft diameter adapter. You'd have to find one with an OD that fits the pulley or sprocket bore and an ID that fits the motor shaft. If both are metric, which is often the case, this is relatively simple. Metric bronze bushings are available now from most industrial suppliers, including McMaster-Carr . If the pulley or sprocket has a simple cross-drilled set screw, then you can use the existing set screw hole as a template to manually drill through the inserted bushing. This hole is important, since having the set screw press against the bushing is an ineffective way to transmit power - a solid mechanical connection must be made between the pulley or sprocket and the motor shaft. Aluminum can shims The boring soda can is in fact a very precise source of shim material. Can wall thickness generally varies from 0.003 to 0.004" near the center to 0.006 inches near the base (but not the rolled base itself). So, if you cut a sliver of drink can and stuff it into a misfitting bore, you can take up slack of around 0.01" diameter. Bore changing - small to large For operations where the bore of the pulley or sprocket needs to be enlarged , this is a much more difficult affair to the non shop endowed. Drilling out a bore is almost certainly guaranteed to render it off-center and wobbly. There is no technique that I know of which can use garage tools (up to and including a drill press) to create an on-center non-wobbly new bore in an existing old one. If you know one, please share it. One option is to use reamers if the diameter is close - for instance, 3/8" to 10mm (0.375 to 0.393 inches), or 5/16" to 8mm (0.3125 to 0.3145 inches). The multiple straight flutes of the reamer tend to keep the reamer centered in the existing bore, but this still only works to a limited degree if it is not started straight to begin with (i.e. you're chucking it in a hand drill). However, the reamer itself can be expensive - $15 to $30 depending on type and size For large bore size changes, like 8mm to 12mm - very common for fitting a 5M-15 HTD pulley to a large outrunner, metalworking machinery is practically indispensable. You may be better off asking a friend with machine tools or getting to know an area machine shop. Such a large change requires multiple "passes" of larger drills and reamers, and without a fixed center on a metal lathe, each pass causes more concentricity loss. Imperial drill bit sizes that are almost metric These drill and reamer sizes are close enough such that you can fudge around and make a fit if the correct size is not available. A #9 drill is 0.1960 and is a spot on slip fit for 5mm shafts. A 15/64" drill is 0.2340 and creates a tight press fit for 6mm (0.2360) shafts Likewise, a B drill is 0.2380 and creates a loose slip fit for 6mm shafts 5/16" is within 0.002" of 8mm (0.3125 vs 0.3145 ) An O (that's the letter O) size drill is 0.3160 and therefore will make a loose slip fit on 8mm shafts 15/32" is almost 12mm , but the difference is 0.004" and is too great to create a press fit. 19mm is almost 3/4" (0.748 vs 0.750 ) Here's my favorite wire gauge and drill size chart for those of you interested in the hard numbers. Gap-filling retaining compound is your friend If you're in a situation where there is a few thousandths of an inch of diametric gap, it's not quite the end of the world. Thick retaining compound is beneficial here. The difference between threadlocking compound and retaining compound is that the latter is designed for use in situations where there aren't threads. Loctite type 635 and 638 are very thick retaining compounds that can fill gaps of 0.01" or more. Type 609 is very thin and my favorite for sticking bearings in pockets where they should be. This does not mean you should rely solely on Loctite for power transmission - a set screw or similar should still be used, but the Loctite, once set, will bridge the gap and make the joint less prone to wobbling. Step 17: Wheels For the most part, you'll probably be picking a wheel off ElectricScooterParts or similar - their wheel selection is pretty much unbeatable, especially for matched sets of front and rear with drive sprockets. I would recommend, however, taking a trip through the likes of eBay and searching for the wheel size first. Non-pneumatic wheels Most small scooters have 100 to 150mm solid rubber or polyurethane wheels. These are often not designed to be driven (are caster wheels) however, and myriad methods exist of adapting them to drive sprockets or shafting. If your means of fabrication are limited, it might not be wise to start with a caster wheel. In the 125mm range, scooter wheels seem to use either a 7 or 12 spoke design. The latter type, which I've termed "Yak" type since that company seems to have made them first, is the most amenable to limited-resource hacking of sprockets and pulleys because of the even number of spokes. In the second image, one hack I did for my 2007 scooter is shown - it uses pins that are pushed into the spokes, with the spacing such that the pressing friction alone retained the wheel sideways. For those fabricating spacing plates at home, these should probably be bolts with washers instead. In the 140mm (about 5.5") range, there are several options for wheels that come with pulleys or sprockets mounted. A fairly common type uses a four-hole spoke design and has a 5mm pulley attached. ElectricScooterParts P/N #WHL-100R and WHL-101R are examples, but the type is also available from a range of dealers. Otherwise, the Razor E100 rear wheel is easy to find as a stock replacement part and is also a 140mm wheel, but has a #25 chain sprocket instead. *New* February 2012Colson Caster wheels Colsons have been a staple of robotics for ages, but they also make very good vehicle wheels. They're non-pneumatic, but at 65A durometer hardness they're rather soft (the average urethane scooter wheel is 80A and skateboard wheels are usually over 90A hardness) and give a much less bumpy ride. They're also easily machinable because the wheel cores are made of polypropylene plastic. If you need a weird wheel size like 3 or 4 inches, say for an electric skateboard or longboard, they're worth looking at. Razor Wind is a scooter that uses gray 4" Colson wheels. RazEr Revolution, one of my own steeds, was recently switched over to using 5" black Colson wheels. Pneumatic wheels If you don't want bone- and component-rattling death, it's wise to start with a pneumatic wheel. The most common small scooter wheel is the 200 x 50 (metric) or "8 x 2" (U.S./imperial) wheel, and ESP has those with 5mm-HTD type pulleys built right into the rim, such as their product number WHL-220R. The tooth count on these integrated pulleys is generally 72 teeth. Monster Scooter Parts also sells 200 x 50 wheels with 56-tooth #25 chain sprockets, an OEM part for the Razor E200, alongside the 5mm HTD belt kind. One excellent resource of small pneumatic tires is Caster City . Discovered by my partner in small EV crime Shane, they have rare 6 inch pneumatic tires (which he has adapted for hub motor duty in the Pneu Scooter ). These 6" pneumatics might be handy if you're seeking a very small vehicle frame but want the ride smoothness that air-filled tires offer. *New* November 2012: Harbor Freight, Northern Tool, and other Discount Tool Caster Wheels One very inexpensive source of 6 to 10 inch diameter pneumatic tires are discount/import tool shops such as Harbor Freight and Northern Tool. These wheels are made to be casters only, so they have bearings in the hubs and there is no way to attach a sprocket directly to the hub. However, the hubs are bolted together, and several builders have used these bolt holes as a chance to locate a plate sprocket or belt pulley using standoffs - little round spacers. An excellent example of adapting a sprocket to a pneumatic caster wheel using this method is Amy's Blitzen bicycle. She used a custom welded standoff-hub, in part because the rear sprocket was required to match a reasonable pedaling pace and could not be made larger. The only down side to these wheels is that they may be of lower quality than a comparable wheel sold through a reputable industrial supplier. However, being so cheap, it's hard to argue. There's nothing much I can say about wheels - if you try hard enough, just about anything that is round can act as a wheel. Live shaft vs. dead shaft It's harder to convert a nondriven wheel into a wheel with a sprocket or belt pulley on it, due to the need for large-diameter concentricity, than it is to just stiffly attach the wheel to a rotating shaft. Therefore, it might be easier to adapt a nondriven wheel to a live (rotating) shaft. In this case, the wheel and axle would be suspended by pillow blocks (bearing blocks) on either side. Small vehicles often use the rear axle as a structural member to prevent the forks from wobbling sideways. If a live shaft is used, this structural member is lost, so the forks should either be reinforced from bending or bridged (connected) at another point. For small wheel hubs, the Robot Marketplace has a selection that matches some of their pneumatic tires. Another option is known in the power transmission world as a "keyless bushing", which functions kind of like an adjustable collet for a shaft. McMaster has a selection of keyless bushings (search those terms). These have a bolt circle you can directly drill into a wheel and retain it, and tighten onto a shaft usually using the same bolts as a clamping mechanism. Another popular live shaft transmission part is the TranTorque bushing, prodced by Fenner Drives, which expands its out diameter and reduces its inner diameter when tightened, thus coupling two parts together. They are found through almost all industrial suppliers including McMaster. *NEW*July 2012: Turning caster wheels into drive wheels Many small caster wheels that can be used on vehicles have their own built-in bearings. With some judicious use of digital fabrication services (waterjet machining, 3D printing, etc.), you can adapt caster wheels to become belt- or chain-drive wheels pretty easily by using their built-in bearings as a crude alignment jig. This method was explored and used in creating Chibikart (Steps 10 through 12 show how the custom designed sprocket was mounted to the wheel). Step 18: Fabrication Resources for the Machine Shop Deprived I keep forgetting at times that not everyone is spoiled by constant machine shop access. Many of the tricks I've developed to deal with fussy parts and assemblies are useful only to people who are in similar situations as me. Specifically, if you know me in person, you know that my favorite anything ever is the abrasive waterjet machine . Almost everything (okay, fine... everything) I build these days seems to be made primarily of waterjet-cut parts erected 3d puzzle style. Same deal with laser cutters - I've become well accustomed to so-called 2-dimensional fabrication techniques because they are fast and easy when Apple-like appearance and Built Ford Tough strength isn't the priority. This is actually a good position to be in, because recently, many services have sprung up across the Internet that will take your part design and actually make it . And unlike large shops and many machine services, you don't have to be making one million of something either. The general name for this kind of stuff is "personal fabrication". Waterjetting, laser cutting If I haven't turned this into a single large product advertisement yet, I'm about to. Currently, the only go-to place for online waterjet part ordering is Big Blue Saw . I can't recommend these guys enough - until others open up to compete, it's the only place I can direct people when they want something waterjet-cut and cannot access a machine in their area or on campus. Many MIT student projects have also used BBS. The turnaround time is short and prices include materials . This is key: buying materials in bulk, huge plates saves money and lowers unit cost of parts. For precision metal parts, there's essentially no better option. 3D printing Another personal fabrication resource that I have taking a liking to is 3-dimensional printing. Not just because I have my own so I insist on 3d printing everything (everything that isn't waterjetted), but because there are also plenty of companies which will take your 3d design and pop out a plastic (or metal) version of it for you. Shapeways and Materialise are two reputable 3d printing houses. Structural vs. nonstructural plastics One important distinction that must be made is the type of material. Most structural parts (i.e. usable mechanically) made on 3DP machines are made of ABS and Nylon (Polyamide, PA) plastic, high-impact thermoplastics. The most common process is fused deposition modeling , which produces immediately usable ABS parts. Nylon parts are typically made with selective laser melting/sintering . Of the two, ABS tends to be cheaper. Shapeways calls ABS "Gray Robust" and Nylon "White, Strong, and Flexible". Materialise just straight up indicates their material as ABS and PA. For structural parts, avoid photopolymer resin or similar - they're generally decorative, but the processes are capable of very high detail. How can 3d printing be used on a vehicle? You can make weird integrated spacers or hubs using them - ABS plastic is usually strong enough in bulk for the purpose. Custom motor mounts are also a possibility. That motor sensor holder for the 63mm class motor that I showed earlier. Design-for-waterjet Through fighting my addiction, I've accumulated quite a few tricks for putting things together using solely waterjet-cut parts. I've compiled some of them in a document I wrote for the MIT 2.007 class, called "How to Build Your Robot Really Really Fast ". This file is hosted on my website, which is worth a look (in the projects sidebar) if you like seeing how much waterjet gluttony I am actually guilty of. It's slightly outdated - I've gradually transitioned to the full cross style of t-nut, so it might see updates to reflect that. My 3d printer , a Makerbot gene transplant, is full of waterjet and laser edge-stitching and T-nutting. Another cool use of waterjet-cut plates is using square tube stock and making clamping plates to fit parts on them. Amy's tinybike is a great example of a single tube construction with waterjet-cut parts clamped to it. Shane's "Tinykart" uses waterjet-cut plates in conjunction with 80/20 structural framing stock. This thing is incredibly stiff, and I'd recommend 80/20 if you can find it cheaply - buying new is quite expensive. Step 19: Conclusion and Epic Resource Dump I have somehow managed to write yet another long Instructable without telling anyone how to make anything. That's fully intentional - this guide is intended not as a manual to build a specific vehicle, but instead to list some of the resources and guidelines I have found useful. Nothing I have said here is backed up 100% by engineering principles or math - it's all experiential and rule of thumb based. Now, go off and abuse model airplane parts to make ground vehicles. Here's a recap of the big vendors of parts I linked to repeatedly: Electric Power Systems Hobbyking : Motors, batteries, chargers, controllers, wiring, connectors... How about "everything-minus-drivetrain"? Kelly Controller : EV-grade controllers, throttles, user interface, some motors. Robot Marketplace : More robot than vehicle, but good large DC motor selection, some controllers. Battery Space : My favorite shady Chinese battery front company. eLifeBike: Shady Chinese company with good stuff. Drivetrain and Mechanical Surplus Center : Cheap sprockets, chain, shafts, bearings, wheels... "Everything-minus-Hobbyking". You can build a vehicle using ONLY parts from these two places. McMaster : Everything ever. Good for chain, sprockets, shafting, and some timing belts. Their wheel selection is also extensive, but many are caster-only. SDP-SI : Precision parts, timing belts and pulleys. B & B : Timing belts and pulleys. ElectricScooterParts : No further explanation needed. Wheels, both driven and undriven. Most everything you would otherwise want on a scooter. *NEW* Monster Scooter Parts: Like ESP, but larger selection in motors and wheels, as well as controllers. Fabrication Big Blue Saw : Waterjetting and laser cutting of most materials that exist. Shapeways : 3d printing. Random People's Websites The EVAlbum Shane The Jedboard Julian's hub motor build Amy's various small and stealthy vehicles Jerome Demers Jamison Go / Iron Man EVPlasmaMan scooter All my random scooters 35 Discussions This 4 year old instructable is STILL a treasure trove of useful relevant information. Thanks for sharing! What is the pinout for the standard, RC esc sensor connector? Wonderful instructable. Have been giving it some thought for a bit and wondering why you haven't considered building your own ESC. From your descriptions a sensorless controller suitable for our purposes seems very doable, but not available within the R/C airplane realm. We need a custom, self-starting, sensorless ESC. Other's have made them. Once you have the basics together fine-tuning becomes possible. Seems like the perfect solution to a host of problems. Perhaps the springboard to designs otherwise out of reach. Thanks for your instructable. It's very interesting and complete but I have a, may be, stupid question. I'm planning to put sensors in an outrunner as well and wanted to know how to tie the sensors to the ESC. There seems to be a standard 6 pin connector shared between the different ESC models. How do I wire the sensors to this connector? This is a pretty sweet little scooter, might make one for my son if he starts behaving again. lol I am legitimately trying to find that gas motor which you showed but did not use. Can you tell me where you found it? I can only find electric models, which are great but not appropriate for my current project. Thanks, -Grey I found a good place to get thick very flexible 8 and 4 gauge wire is the local car stereo center. Th 8 gauge I used cost $1 per foot. They had 4 gauge but it was much more expensive. Wow after reading this article now I'm building a scooter.? If you run both you need a controller for each. Now this may sound easy but you need to be careful to set up each esc the same and use the exact same model of esc and motor or one motor will do more work. Now unless you want to push more than 200 lbs around at 20 mph or more one motor should be sufficient as long as you use the gearing equation from this article as to not burn up your motor. Good luck and feel welcome to ask any questions you want to. Hobbyking now sells 20C four cell 5 Ah lipo packs from the US warehouse for about $25 per pack: Could I maybe use a large pvc pipe to hold the hall effect sensors? How exactly do you back install hall effect sensors in a motor? I am trying to make an electric motorcycle and want to use a kelly controller. I have a Turnigy 80-100-A 180 KV brushless outrunner (the same class that is on the scooter in this instructable) and have had a really hard time figuring out how to install sensors. I am also having a hard time figuring out how this motor is wound. Any help would be greatly appreciated, thanks in advance. It is extremely difficult to install *internal* sensors onto a mystery motor (of which you do not know the internal winding pattern). The C80 series is also very difficult to discern because they require so much current.. Thank you for the info, I have tried to run current through the different phases but I could not get enough current to get discernible results. I am thinking of just rewinding the motor so I know where the different phases are. Unfortunately I do not have access to anything to make an external mount for hall effect sensors. Do you know of a website that could make a mount for me? I also do not have any experience using CAD software. Another question I have is do all hall effect sensors have the same wiring scheme? The website I got mine from did not tell me which lead is which.
http://www.instructables.com/id/The-New-and-Improved-Brushless-Electric-Scooter-Po/
CC-MAIN-2018-30
refinedweb
22,122
60.24
12 July 2011 23:59 [Source: ICIS news] LONDON (ICIS)--European July epoxy resin contracts have settled €100/tonne ($141/tonne) below the June contract price because of falling feedstock costs, market sources said on Tuesday. This brings liquid epoxy resin (LER) prices down to €2,900-2,950/tonne FD (free delivered) NWE (northwest ?xml:namespace> July solid epoxy resins (SER) have reduced at the same rate and now stand at €2,950-3100/tonne. Market players are concerned about struggling southern European economies and a sluggish economic recovery Europe-wide. In addition, the European holiday season in August is also expected to slow demand. Currently, demand is healthy for both SER and LER, although signs of a slowdown are beginning to show, sources said. " The contract price reduction was due to declining feedstock bisphenol-A (BPA) and epichlorohydrin (ECH) costs and improving supplies. Raw material supplies in Europe have improved due to increasing BPA imports from "The extra BPA imports
http://www.icis.com/Articles/2011/07/12/9476928/europe-july-epoxy-resin-drops-100tonne-on-lower-feedstock-costs.html
CC-MAIN-2013-48
refinedweb
162
52.7