text
stringlengths
454
608k
url
stringlengths
17
896
dump
stringclasses
91 values
source
stringclasses
1 value
word_count
int64
101
114k
flesch_reading_ease
float64
50
104
On Mon, Sep 22, 2003 at 01:55:07PM -0400, chas williams wrote: > In message <20030922180749.A26432@xxxxxxxxxxxxx>,Christoph Hellwig writes: > >Umm, shouldn't you just ifdef out the whole function and it's > >assignment to the operations vector? This looks horribly ugly.. > > i suppose i am not convinced that two #ifdef/#endif's are better than an > #if/#else/#endif first ifdefs in a function are always worse than around functions, second ifdefs in a function that make it a stuv are pretty ugly and third your (void)arg crap is ugly as hell. (what compiler do you have that complains about this, btw, gcc 3.3 doesn't..). So if you prefer one if/else/endif you can do it as #if FOO foo_ioctl() { } #else #define foo_ioctl NULL #endif but in the case of assigning a function pointer to an operation vector I find this more confusing than the above variant. ---end quoted text---
http://oss.sgi.com/projects/netdev/archive/2003-09/msg01585.html
CC-MAIN-2014-15
refinedweb
156
76.56
I have a problem with my timer. I have set a timer variable to 70 and after this time delay a new ball should be created. It works, but after a certain time there are too many balls. So I intend that after 200 balls are created, all balls except one should be deleted and all starts from beginning on. I tried to solve the problem with a List that counts all the ball objects. But I don`t manage it .... Thanks a lot! import greenfoot.*; // (World, Actor, GreenfootImage, Greenfoot and MouseInfo) public class Balls extends Actor { private int intervall = 70; public void act() { this.move(4); this.changingDirection(); this.controllingTimer(); } public void changingDirection() { if(this.isAtEdge()) { this.turn(180); } } public void controllingTimer() { if(intervall > 1) { this.intervall--; } else { int x = Greenfoot.getRandomNumber(580); int y = Greenfoot.getRandomNumber (380); MyWorld mw = this.getWorldOfType(MyWorld.class); mw.addObject(new Balls(), x, y); intervall = 70; } } }
https://www.greenfoot.org/topics/58303/0
CC-MAIN-2017-30
refinedweb
153
61.73
compsub - Package for defining compilation subroutines package SomeThing; sub import { compsub::define( foo => sub { return $_[0] || B::OP->new('null', 0) } ); } ... package main; use Something; ... foo $a, $b = (1, 2); This module will create segmentation faults if you don't know how to use it properly. You are expected to be familiar with the perl internals to use this module properly.. Keywords for a compilation sub are lexically scoped, and can be declared for example by a use Module. Use the name of the compilation sub to call it. foo $arg1, $arg2, @args; The compilation sub is not affected by parenteses, i.e. foo($arg1, $arg2), @args @args is still passed to foo. The compilation sub is called after parsing the arguments, and the compile time setting changed and declarations made by the compilation sub do NOT affect the arguments. What the compilation sub exactly does (at compile and run-time) is up to the sub. Things it can do at compile time are for example: pre-process constant arguments, do some basic argument verfication, declare other compilation subs, inline calls. Things it can do at run-time are for example: call a subroutine with its normal arguments, do nothing, evaluate one of its arguments multiple times, call multiple subroutines with the same arguments. A compilation subs are lexical scope, and can be declared into the lexical scope currently being compiled using: compsub::define( foo => \&compfoo ) This defines a compilation subroutine foo, which gets compiled using compfoo. To declare the function at compile-time, you usually have to define it in a BEGIN block or in an import routine. When foo is used compfoo gets called with as argument the opcode for the list following foo. compfoo is expected to return an opcode, this opcode will be inserted in the place where foo is called. Opcodes are instances of B::OP or one of its subclasses. There is no verification of the "correctness" of the opcode tree generated, so you may easily created opcode trees which wrong and generate segfaults or manipulate random memory and stuff like that. See also B::OP about creating and maniuplating op trees. Opcodes discarded are not automaticly freed. The opcodes are freed normally with freeing of the sub. If you want to discard an opcode you have to explicitly call 'free' on it. This also applies to the opcode passed to the compsub, i.e. if it isn't used in the opcode tree returned by you, you should free the it. This example creates a compilation sub debuglog, which calls dolog if $ENV{DEBUG} is set. Thus checking for $ENV{DEBUG} is done at compile time, and if it not set no code is executed at run-time. sub dolog { print $log, @_; } # this subroutines compiles when $ENV{DEBUG} is set to calling 'dolog' with its arguments, # otherwise it will do nothing (including NOT evaluting its arguments). sub compdebuglog { my $op = shift; if ($ENV{DEBUG}) { my $cvop = B::SVOP->new('const', 0, *dolog); $op = B::LISTOP->new('list', 0, ($op ? ($op, $cvop) : ($cvop, undef))); return B::UNOP->new('entersub', B::OPf_STACKED|B::OPf_SPECIAL, $op); } else { $op && $op->free; # we don't use $op, so we must explicitly free it. return B::OP->new('null', 0); } } compsub::define( debuglog => \&compdebuglog ); ... debuglog Dump($complexvar); In this example a keyword params is created. This keyword expects a list of compile-time constant string arguments, and as last argument a hashref. It creates a lexical scope variable for each string argument. At run-time the lexical scoped variables set to the hash value with their name. # assumes argument like: 'foo' => \$foo, 'bar' => \$bar, { @_ } sub parseparams { my $values = pop @_; while (my $name = shift @_) { $_[0] = $values->{$name}; shift @_; } } # assumes argument like C<'foo', 'bar', { @_ }> # this will be converted like C<parseparams('foo', \(my $foo), 'bar', \(my $bar), { @_ })> sub compparams { my $op = shift; $op or return B::UNOP->new('null', 0); my $kid = $op->first; while (ref $kid ne "B::NULL") { if ($kid->name eq "const") { # allocate a 'my' variable my $targ = B::PAD::allocmy( '$' . ${ $kid->sv->object_2svref } ); # introduce the 'my' variable, and insert it into the list of argument. my $padsv = B::OP->new('padsv', B::OPf_MOD); $padsv->set_private(B::OPpLVAL_INTRO); $padsv->set_targ($targ); $padsv->set_sibling($kid->sibling); $kid->set_sibling($padsv); $kid = $padsv; } elsif ($kid->name eq "list" or $kid->name eq "pushmark") { # ignore } elsif ($kid->name eq "anonhash") { # ignore, assume it is the last item in the list. } else { die "Expected constant opcode but got " . $kid->name; } $kid = $kid->sibling; } my $cvop = B::SVOP->new('const', 0, *parseparams); $op = B::LISTOP->new('list', 0, ($op ? ($op, $cvop) : ($cvop, undef))); my $entersubop = B::UNOP->new('entersub', B::OPf_STACKED|B::OPf_SPECIAL, $op); return $entersubop; } BEGIN { compsub::define( params => \&compparams ) } { sub foobar { params 'foo', 'bar', { @_ }; is $foo, 'foo-value', '$foo declared and initialized'; is $bar, 'bar-value'; } foobar( foo => "foo-value", bar => "bar-value" ); } The hint hash %^H is used to define the lexical scoped keyword. And is used during tokenizing to find the subroutine. After it a listexpr is expected by the parser. After parsing the listexpr, ck_compsub calls the subroutine and returns the opcode return by the sub. Gerard Goossen <gerard@tty.nl>.
http://search.cpan.org/~tty/kurila-1.19_0/lib/compsub.pm
CC-MAIN-2015-11
refinedweb
873
53.51
Question: Do people practically ever use defensive getters/setters? To me, 99% of the time you intend for the object you set in another object to be a copy of the same object reference, and you intend for changes you make to it to also be made in the object it was set in. If you setDate ( Date dt ) and modify dt later, who cares? Unless I want some basic immutable data bean that just has primitives and maybe something simple like a Date, I never use it. As far as clone, there are issues as to how deep or shallow the copy is, so it seems kind of "dangerous" to know what is going to come out when you clone an Object. I think I have only used clone() once or twice, and that was to copy the current state of the object because another thread (ie another HTTP request accessing the same object in Session) could be modifying it. Edit - A comment I made below is more the question: But then again, you DID change the Date, so it's kind of your own fault, hence whole discussion of term "defensive". If it is all application code under your own control among a small to medium group of developers, will just documenting your classes suffice as an alternative to making object copies? Or is this not necessary, since you should always assume something ISN'T copied when calling a setter/getter? Solution:1 From Josh Bloch's Effective Java:. Item 24: Make defensive copies when needed Solution:2 This is a non-trivial question. Basically, you have to think about any internal state of a class that you give to any other class via getter or by calling another class' setter. For example, if you do this: Date now = new Date(); someObject.setDate(now); // another use of "now" that expects its value to not have changed then you potentially have two problems: someObjectcould potentially change the value of " now", meaning the method above when it later uses that variable could have a different value than it expected, and - if after passing " now" to someObjectyou change its value, and if someObjectdid not make a defensive copy, then you've changed the internal state of someObject. You should either protected against both cases, or you should document your expectation of what is allowed or disallowed, depending on who the client of the code is. Another case is when a class has a Map and you provide a getter for the Map itself. If the Map is part of the internal state of the object and the object expects to fully manage the contents of the Map, then you should never let the Map out. If you must provide a getter for a map, then return Collections.unmodifiableMap(myMap) instead of myMap. Here you probably do not want to make a clone or defensive copy due to the potential cost. By returning your Map wrapped so that it cannot be modified, you are protecting your internal state from being modified by another class. For many reasons, clone() is often not the right solution. Some better solutions are: - For getters: - Instead of returning a Map, return only Iterators to either the keySetor to the Map.Entryor to whatever allows client code to do what it needs to do. In other words, return something that is essentially a read-only view of your internal state, or - Return your mutable state object wrapped in an immutable wrapper similar to Collections.unmodifiableMap() - Rather than returning a Map, provide a getmethod that takes a key and returns the corresponding value from the map. If all clients will do with the Mapis get values out of it, then don't give clients the Mapitself; instead, provide a getter that wraps the Map's get()method. - For constructors: - Use copy constructors in your object constructors to make a copy of anything passed in that is mutable. - Design to take immutable quantities as constructor arguments when you can, rather than mutable quantities. Sometimes it makes sense to take the long returned by new Date().getTime(), for example, rather than a Dateobject. - Make as much of your state finalas possible, but remember that a finalobject can still be mutable and a finalarray can still be modified. In all cases, if there is a question about who "owns" mutable state, document it on the getters or setters or constructors. Document it somewhere. Here's a trivial example of bad code: import java.util.Date; public class Test { public static void main(String[] args) { Date now = new Date(); Thread t1 = new Thread(new MyRunnable(now, 500)); t1.start(); try { Thread.sleep(250); } catch (InterruptedException e) { } now.setTime(new Date().getTime()); // BAD! Mutating our Date! Thread t2 = new Thread(new MyRunnable(now, 500)); t2.start(); } static public class MyRunnable implements Runnable { private final Date date; private final int count; public MyRunnable(final Date date, final int count) { this.date = date; this.count = count; } public void run() { try { Thread.sleep(count); } catch (InterruptedException e) { } long time = new Date().getTime() - date.getTime(); System.out.println("Runtime = " + time); } } } You should see that each runnable sleeps for 500 msec, but instead you get the wrong time information. If you change the constructor to make a defensive copy: public MyRunnable(final Date date, final int count) { this.date = new Date(date.getTime()); this.count = count; } then you get the correct time information. This is a trivial example. You don't want to have to debug a complicated example. NOTE: A common result of failure to properly manage state is a ConcurrentModificationException when iterating over a collection. Should you code defensively? If you can guarantee that the same small team of expert programmers will always be the ones writing and maintaining your project, that they will continuously work on it so they retain memory of the details of the project, that the same people will work on it for the lifetime of the project, and that the project will never become "large," then perhaps you can get away with not doing so. But the cost to defensive programming is not large except in the rarest of cases -- and the benefit is large. Plus: defensive coding is a good habit. You don't want to encourage the development of bad habits of passing mutable data around to places that shouldn't have it. This will bite you some day. Of course, all of this depends on the required uptime of your project. Solution:3 For both of these issues, the point is explicit control of state. It may be that most of the time you can "get away" without thinking about these things. This tends to be less true as your application gets larger and it gets harder to reason about state and how it propagates among objects. You've already mentioned a major reason why you'd need to have control over this - being able to use the data safely while another thread was accessing it. It's also easy to make mistakes like this: class A { Map myMap; } class B { Map myMap; public B(A a) { myMap = A.getMap();//returns ref to A's myMap } public void process (){ // call this and you inadvertently destroy a ... do somethign destructive to the b.myMap... } } The point is not that you always want to clone, that would be dumb and expensive. The point is not to make blanket statements about when that sort of thing is appropriate. Solution:4 I've used Clone() to save object state in the user's session to allow for Undo during edits. I've also used it in unit tests. Solution:5 I can think of one situation where clone is much preferable to copy constructors. If you have a function which takes in an object of type X and then returns a modified copy of it, it may be preferable for that copy to be a clone if you want to retain the internal, non-X related information. For example, a function which increments a Date by 5 hours might be useful even if it was passed an object of type SpecialDate. That said, a lot of the time using composition instead of inheritance would avoid such concerns entirely. Solution:6 I don't like the clone() method, because there is always a type-cast needed. For this reason I use the copy-constructor most of the time. It states more clearly what it does (new object) and you have much control about how it behaves, or how deep the copy is. At my work we don't worry about defensive programming, although that is a bad habbit. But most of the time it goes ok, but I think I am going to give it a closer look. Solution:7 One thing I'm alwyas missing at a "defensive copy discussion" is the performance aspect. That aiscussion is IMHO a perfect example of performance vs readability/security/robustness. Defence copies are great for ropbust ode. But if you use it in a time critical part of your app it can be a major performance issue. We had this discussion recently where a data vector stored its data in a double[] values. getValues() returned values.clone(). In our algorithm, getValues() was called for a lot of different objects. When we were wondering, why this simple piece of code took so long to execute, we inspected the code - replaced the return values.clone() with return values and suddenly our total execution time was lowered to less than 1/10 of the original value. Well - I don't need to say that we chose to skip the defensiveness. Note: I'm not again defensive copies in general. But use your brain when clone()ing! Solution:8 I have started using the following practice: Create copy constructors in your classes but make them protected. The reason for this is that creating objects using the new operator can lead to various issues when working with derived objects. Create a Copyable interface as follows: public interface Copyable<T> { public T copy(); } Have the copy method of classes implementing Copyable call the protected copy constructor. Derived classes can then call super.Xxx(obj_to_copy); to leverage the base class copy constructor and adding additional functionality as required. The fact that Java supports covariant return type makes this work. Derived classes simply implement the copy() method as appropriate and return a type-safe value for their particular class. Note:If u also have question or solution just comment us below or mail us on toontricks1994@gmail.com EmoticonEmoticon
http://www.toontricks.com/2018/06/tutorial-is-clone-really-ever-used-what_1.html
CC-MAIN-2018-43
refinedweb
1,765
62.58
See also: IRC log <Steven> <Ryladog> Ryladog = Katie Haritos-Shea <scribe> ScribeNick: Lachy janina: the important thing is the intent: zoom, pan, etc. rather than click, key press. … so we have james craig from apple and rich from IBM who will dive into technical details janina: Rich is first rich: A few years ago we started with ARIA 1.0 … the original purpose was to make rich internet applications accessibile. … It put semantics into the DOM that the browser would then take and expose to the assistive technology … One thing out of scope was platform independents of devices. e.g. pull up your mobile device and you don't have a keyboard. … We're going to address this in Indie UI … Second thing is user context. … It's been external to the W3C, but has been worked on elsewhere for a few years … as we go to mobile where there's a definiate need for the web app to adapt to the users needs, how do you need content to be delivered at the time it's being processsed. james: The next bit is how we've split the deliveralbes … these are two independent sections, somewhat unrelated, but both needed. … the first is the events module. … Some background. … events currently notify when something has happened. Click event, focus event, DOM Mutation events. … These particular events are not intended to notify of a change that has already happened, but to notify an event that notifies the users request to control the application … One example in the current draft is a dismiss request. … e.g. On a page with a modal dialog, the page might dismiss the dialog by listening for the Esc key … It listens for different things on different platforms. … That doesn't work on devices without keyboards, or without the specific keys. … These events work in context where the device understands the intent in a device specific way, and so can convey that intent directly to the web page. … e.g. the user wants the next page of the results, or change the value of a control. … the de facto key press bits defined in the ARIA authoring guide are a stop gap measure … It relies on device specific events. … some of the more complex parts of ARIA only work if you have these keyboard events. … These events are intended to abstract out how the user controls the web app. … Then the second deliverable is the user context module. …part of this is to allow introspection into the user's needs. … A common request that has real privacy concerns is whether or not the user is using assistive technology. … we're aware of the privacy concerns, but some of the information is about which screen reader is being used, etc. … type of colours they prefer, preferred fonts, etc. … obviously, some of the things more related to disabilities need to have some way for the user to opt in … The other part proposed is additions to the nagivator object. <Steven> Some history - … e.g. An event has happened at a particular location of the screen where the keyboard focus is elsewhere. … Basically, the overview of these events are things that allow assistive technology and mainstream browsers to provide addtional control to the web app, and provide some introspection into the user's needs. … Rich wanted to mention some existing work. Rich; In the education space: Something called Access for All; APIP used for learning assessment. … Define a set of user needs, and have metadata about them … e.g. might have a need to have something captioned. … I have this video that is not captioned, but I have a related video that is, which could be a substitute. … it doesn't have the world wide adoption. … I may not have a fixed set of prefernces. e.g. one of the things we can't do today, if I go into a noisy room and all of a sudden my device could detect that the noise is so high, the web app should know to turn captions on. … I could have an HTML 5 video element. The device could adapt to the environment and turn on the captions for the user. … The user didn't have to do it manually … in terms of education, this would really help the education space as well. … Those are some things we're trying to do. … If we had other types of input, what are the use cases that assistive technology could do. james: e.g. The first of two examples I mentioned earlier was an ARIA slider. … The value change request could be done in a number of ways. … The other I mentioend was escapting from a dialog … Who is familiar with VoiceOver on iOS? … It is a screen reader … Because it is a touch screen, the screen reader intercepts the user's touch events. … Knowing the user's intent is part of the operating system and the assistive technology … So if a user did a gesture that indicates that they want to exit from this view, or speaks a command, as in the context of other assistive technologies like Dragon, the system could interpret that and send the appropriate event to the web app … the web app wouldn't have to be concerned with physical events. … One more example, some of the keypress events that were defacto defined as part of ARIA... (???) rich: Advantages for mainstream developers. say my android device has voice recognition commands. I can move to that platform without having to recode my web app to respond to those events. … I can have higher level commands that fire on different platforms … regardless of the specific device or platform in use james: I'd really like to address any confusion you may have? magnus: I was wondering about the context. If that's something could come from an external source, or is it always within the browser. … Could it be injected to the browser, from an automotive context, or other use. james: it's not just specific to the browser. e.g. the example I metioned for the screen reader or voice control. Those ATs run outside the context of the browser, but use the browser as a gateway. … The AT understands what the user wants to do and passes it to the browser, which in turn passes it to the web app. Janina: the bottom line is that it doesn't matter how you collect the data. james: specifically, how the OS or AT collects that data is out of scope. magnus: It's all an abstraction you impose on the browser. james: yes janina. yes, there is a way to inject that into the browser. james: System level APIs for communicating between the AT and the browser or device is out of scope. magnus: We have the web driver initiative where we have an a system to drive the browser itself, simulating a user raman: One way is coming at it with a set of high level events, and it's up to the implementation to decide how to map to those …. The other end of it is that there are a set of frameworks that allow the a system to inject a set of events, (simulating a user) … But at the end of the day, the author hasn't captured in the application is that what he's actually interested in is what the user is trying to do. james: So we did anticipate in the short term, the most accessible web apps are relying on a series of keyboard focus events or press events to respond to the user … Web apps would want to respond to both those events and the indie ui events in the short term, but transition to more of these events in the long term. markus: In terms of the user profile and the fields within it, how do you handle... … There's potentially an infinite number of preferences. … How do you handle the tension between the obvious ease of a finite set of fields vs. the need for extensibility james: One of the things is the ability for a web ap to make a js call on the navigator object to obtain a user preference. … we want to keep the common events defined in the specifciation, but make it possible to incorporate the larger taxonomies from the groups that rich mentioned earlier. … The specifcs are yet to be worked out. … maybe a namespaced key. Might allow for vendor specific keys. … Safari has a check box to change the way, e.g. a tab works. That would probably need a vendor type prefix. … The idea is to incorporate any preference, while defining the common ones. … Certain preferences have privacy implications. … The UA might ask the user to grant permission. rich: this would be great for epub too. gz: The two topics of indiie ui are events and context. One a high overview, there are two strateies. … For input, we want to take the burdon off the author and let them code in abstract ways. For output, we want to load the burden on the author and let them query the user's needs and tailior the output. rich: yes and no. There could be services to do this for them, but at the end of the day, it's up to the application. james: For input, we want to abstract as much as possible. … Take the burdon off the author. … But there are a variet of documented reasons where we can't know all of the details. … In cases where we know there is a gap, we want to provide a way for authors to get that information. gz: In the responsive images session, one of the things we discussed was can the author forsee the needs ot the user. There, we said, no probably not, becasue the author doesn't forsee all possibilites. … media queries was ruled out there because the author dictates the outcome of the decision. Whereas if we postpone the decision to run time, we have higher flexibility. … Especially in accessibility, if we rely on the author, we are lost. janina: I don't think we're changing the requirements on authoring. … YouTube videos mostly won't have captions. … But education content, medical content, etc will become more accessible. … The content will be driven as in the past with socaial action. … What we're talking about is taking the burden off the user and letting the device and the web app suit the user's need. james: Setting preferences in the user style sheet, such as colour, never works out well because the author doesn't know about it. … Font size is one that's pretty easy to adapt to. … you can easily adapt to this by specifying units relative to the font size. … e.g. Changing the background and foreground colour. If the web app knew the user wanted high contrast, it could adapt to that. … we want to provide that flexibility for the ones that need it. janina: That flexibility is a curb cut. You don't need a disability to benefit. The machine could adjust the contrast based on the user's environment. rich: When you bring in an epub publication, is some of the injected javascript put into the page by the tool that allows you to flexibily change things? … You have epub document, a player that loads it, can the user inject javascript markus: no. … Author provided only. james: But a publisher that's publishing lots of epubs could deelop something for this. Magnus: is there any plans to have some kind of mapping between these events and physical events. james: Not really. That's mostly platform dependent. ... ATs may change the way those key presses work. rich: We could publish a note that was per-platform. james: An informative listing of suggestions could be published. RRSAgent: make minutes RRSAgent: make public RRSAgent: make logs public This is scribe.perl Revision: 1.137 of Date: 2012/09/20 20:19:01 Check for newer version at Guessing input format: RRSAgent_Text_Format (score 1.00) Succeeded: s/ST/St/ Succeeded: s/or speaks a command,/or speaks a command, as in the context of other assistive technologies like Dragon,/ Succeeded: s/Aria is a stop gap measure/the de facto key press bits defined in the ARIA authoring guide are a stop gap measure/ Succeeded: s/ARIA only works if you have/some of the more complex parts of ARIA only work if you have/ Succeeded: s/The two examples I mentioned earlier were aria sliders./The first of two examples I mentioned earlier was an ARIA slider./ Succeeded: s/voice over/VoiceOver/ Found ScribeNick: Lachy Inferring Scribes: Lachy Present: Steven_Pemberton TVRaman Katie_Haritos-Shea Gottfried_Zimmermann Lachy ddahl Sylvie David_MacD_Lenovo smaug Javi Gottfried richardschwerdtfeger Judy jcraig Ryladog cris Steven bjkim MichaelC_ ethan_ shepazu hober Got date from IRC log name: 31 Oct 2012 Guessing minutes URL: People with action items: WARNING: Input appears to use implicit continuation lines. You may need the "-implicitContinuations" option.[End of scribe.perl diagnostic output]
http://www.w3.org/2012/10/31-indie-ui-minutes
CC-MAIN-2014-10
refinedweb
2,182
72.05
In a previous notebook, I showed an example implementation of an artificial neural network (ANN) using only numpy. I took this approach because I really wanted to dig into the guts of how ANNs work to understand what they're doing on a fundamental level. The architecture of the example network I used was very simple: a single input layer for two features, two hidden layers with 5 hidden units each, and an output layer with two units corresponding to output classes. Each layer was fully-connected to the next one (these are known as "dense" layers), which showed how things can get complex fairly quickly. However, ANNs really shine in deep learning, where the number of layers far exceeds the number in my previous example. It's no surprise then that a number of libraries have recently popped up for working with ANNs in Python. Several libraries exist in the Python ecosystem for dealing with deep learning / ANNs in various capacities such as Theano, TensorFlow, Keras, Lasagne, NoLearn, and Blocks. These libraries however are not all mutually exclusive nor do they serve to fill the same niches. You can choose your favorite based on how dirty you like to get your hands. In this notebook I will re-implement our example network using Keras. Keras is a framework for building ANNs that sits on top of either a Theano or TensorFlow backend. I like the way Keras works because I find it allows you to focus on the network structure, and you can get a model up and training very quickly. The Keras Blog has some great examples of how to use the framework. It is common to use GPUs to accelerate the training of neural networks as the underlying computations lend themselves well to parallelization. If you want to use the GPU on your local machine for training neural networks, it may take some work to get things up and running. My current setup is a 2013 MacBook Pro running Arch Linux. The machine has a NVIDIA GeForce GT 650M and I've set up Theano to use the device. This requires installing CUDA and a compatible version of gcc as suggested by the GPGPU page on the Arch Wiki. I have tried to use Theano with a GPU on a Windows 8 machine, but had no success after investing a large amount of time. TL;DR: If you want to use your local GPU to train neural networks, you're probably best off running Linux. Recall the structure of our example network from the previous notebook, where $s_l$ represents a layer of neurons in the network: Also recall that we used a sigmoid activation function, and our cost function included a regularization term. We'll keep this in mind as we build our network. The Keras API is based around layers that are organized into models (see 30 seconds to Keras for a quick overview). Let's import the classes we will need. import numpy as np np.random.seed(0) from sklearn import datasets import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' from keras.models import Sequential from keras.layers import Dense from keras.optimizers import SGD Using Theano backend. Using gpu device 0: GeForce GT 650M (CNMeM is disabled, cuDNN 5005) Next, we'll generate the same dataset we used in our previous notebook. X, y = datasets.make_moons(n_samples=1000, noise=0.1, random_state=0) colors = ['steelblue' if label == 1 else 'darkred' for label in y] plt.scatter(X[:,0], X[:,1], color=colors) y.shape, X.shape ((1000,), (1000, 2)) We first instantiate a model object, and then add layers to it with their respective activation functions and weight initializations. Keras has different activation functions built in such as 'sigmoid', 'tanh', 'softmax', and many others. Also built in are different weight initialization options. The choice of activation and initialization can be specified using kwargs when adding a layer to the model. Our example network uses a sigmoid activation function and initializes the weights between $[-\epsilon, \epsilon]$. Where $\epsilon$ is defined as: This initialization method corresponds to the 'glorot_uniform' initialization option in Keras. # Define our model object model = Sequential() # kwarg dict for convenience layer_kw = dict(activation='sigmoid', init='glorot_uniform') # Add layers to our model model.add(Dense(output_dim=5, input_shape=(2, ), **layer_kw)) model.add(Dense(output_dim=5, **layer_kw)) model.add(Dense(output_dim=1, **layer_kw)) In Keras, you must also specify an optimizer to use when training your model. In our previous example we used batch gradient descent where we used our entire training set for each training iteration. Our data set wasn't particularly large so this was a feasible option, however in practice stochastic gradient descent is used. This is when the model is trained on smaller, random batches of the training set. This is also usually coupled with a smaller learning rate. Here we will define a stochastic gradient descent optimizer with a learning rate of 0.01 ($\alpha$, which we set to 0.5 in our previous example), which should behave similarly. In Keras, a stochastic gradient descent optimizer object is SGD. sgd = SGD(lr=0.1) Finally we must choose a loss function and "compile" our model. We will use a cross-entropy loss function corresponding to the loss in our previous example). In Keras-speak "compiling" essentially configures your model to work with your optimizer and loss function. You can specify the optimizer and loss function in kwargs using specific strings or by passing specific objects. We're using an optimizer with a custom learning rate with the vanilla cross-entropy loss function. model.compile(optimizer=sgd, loss='binary_crossentropy') We can now train our model using the model.fit() method. Usually this method prints a nice text-based indicator of the training progress. We'll turn this off ( verbose=0) since we're going to use a large number of iterations ( nb_epoch=4000, greater than our last example due to the smaller $\alpha$). We can also easily capture information about the training process in the History object returned by model.fit(). For good measure we'll also pass the shuffle=True so that our training data batches are shuffled before each pass. history = model.fit(X[:500], y[:500], verbose=0, nb_epoch=4000, shuffle=True) Once trained, we can have a look at our loss function over each epoch, as found in history.history['loss']. If all goes well, this should be monotonically decreasing, and close to zero. plt.plot(history.history['loss']) [<matplotlib.lines.Line2D at 0x7f30f8063b00>] Interesting...this looks a lot like the loss during traning of our previous implementation. Here we'll bring back our plot_decision_boundary function and have it use a divergent color map. def plot_decision_boundary(X, y, model, steps=1000, cmap='Paired'): """ Function to plot the decision boundary and data points of a model. Data points are colored based on their actual label. """ cmap = plt.get_cmap(cmap) # = model.predict(np.c_[xx.ravel(), yy.ravel()]) # Plot decision boundary in region of interest z = labels.reshape(xx.shape) fig, ax = plt.subplots() ax.contourf(xx, yy, z, cmap=cmap, alpha=0.5) # Get predicted labels on training data and plot train_labels = model.predict(X) ax.scatter(X[:,0], X[:,1], c=y, cmap=cmap, lw=0) return fig, ax plot_decision_boundary(X, y, model, cmap='RdBu') (<matplotlib.figure.Figure at 0x7f313e9b6160>, <matplotlib.axes._subplots.AxesSubplot at 0x7f31003d6d68>) We can see in this post that using a library dedicated to working with ANNs like Keras makes it much easier to get a model up and running. We recreated our example ANN using only 7 lines of Keras, and we could easily choose our activation function, weight initialization, optimizer, and loss function. Check out the Keras blog for some other examples of this library in action. If you're feeling particularly ambitious and have your GPU set up to work with Keras, you can try identifying nerve structures in ultrasound images on Kaggle using this convolutional ANN put together by Marko Jocić. Don't be intimidated, it's not as hard to run as it sounds!
https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
CC-MAIN-2020-29
refinedweb
1,350
56.15
Search... FAQs Subscribe Pie FAQs Recent topics Flagged topics Hot topics Best topics Search... Search within Java in General: Java in General Hibernate, Spring and H2 Seamus Minogue Ranch Hand Posts: 41 posted 12 years ago Number of slices to send: Optional 'thank-you' note: Send First off if this is the wrong place... sorry. I didnt think it really fit in the Hibernate area of the forum. I am writing test cases for DAO's in a project. Rather than simulate the database access I really am going to a database. My application will eventually be running on a MySql database, but I dont want to use that for testing. So at present when my test cases run spring calls derby which creates a new database. By using Hibernate's HBM2DDL.auto property I am creating my table structure and everything works fine happiness, right? So my problem is (other than derby creating this annoying directory in my project for the database) derby seems very slow. At present my project is executing 81 tests of the DAO's in just over 8.5 seconds. Now I can live with 8.5 seconds delay in the build. The problem is I am only testing about 33% of the DAO. So I expect that number to probably triple. Again, 25 seconds delay during build is something I can live with. I am thinking down the road when I start testing other things like the service layer of my application (which trickles down to the DAO and DB) that number is going to go up. I really dont want to end up having a build which takes several minutes to execute! So I have been looking at switching from derby (file based) to H2Database which according to their website (they wouldn't stretch the truth would they?) is the fastest in memory DB solution out there. The problem I am running into is that when I execute my tests I get an error that my schema does not exist and my tests fail. Has anyone gotten this to work? If not has anyone done something similar and can tell me what in memory DB they used and how? Thanks Seamus Minogue Ranch Hand Posts: 41 posted 12 years ago Number of slices to send: Optional 'thank-you' note: Send Ok so I worked this out and thought I would share with all those who have suffered as I have suffered. applicationContext.xml: <bean id="dataSource" class="org.apache.commons.dbcp.BasicDataSource" destroy- <property name="driverClassName" value="org.h2.Driver" /> <property name="url" value="jdbc:h2:mem:joshua" /> <property name="username"><value>sa</value></property> <property name="password"><value></value></property> </bean> <!-- Hibernate SessionFactory --> <bean id="sessionFactory" class="org.springframework.orm.hibernate3.LocalSessionFactoryBean"> <property name="dataSource"> <ref local="dataSource" /> </property> <property name="hibernateProperties"> <props> <prop key="hibernate.dialect"> org.hibernate.dialect.H2Dialect </prop> <prop key="hibernate.show_sql">false</prop> <prop key="hibernate.connection.provider_class">org.hibernate.connection.C3P0ConnectionProvider</prop> <prop key="hibernate.hbm2ddl.auto">create</prop> </props> </property> <property name="mappingResources"> <list> <value> Games.hbm.xml </value> </list> </property> </bean> <bean id="transactionManager" class="org.springframework.orm.hibernate3.HibernateTransactionManager"> <property name="sessionFactory"> <ref local="sessionFactory" /> </property> </bean> <bean id="hibernateTemplate" class="org.springframework.orm.hibernate3.HibernateTemplate"> <property name="sessionFactory"> <ref bean="sessionFactory" /> </property> </bean> <bean id="GamesDao" class="GamesDaoImpl"> <property name="hibernateTemplate"> <ref bean="hibernateTemplate" /> </property> </bean> Games.hbm.xml: <?xml version="1.0" encoding="utf-8"?> <!DOCTYPE hibernate-mapping PUBLIC "-//Hibernate/Hibernate Mapping DTD 3.0//EN" ""> <hibernate-mapping> <class name="Games" table="games"> <id name="id" type="java.lang.Integer"> <column name="id" /> <generator class="native" /> </id> <property name="name" type="java.lang.String"> <column name="name" length="100" not- </property> <property name="description" type="java.lang.String"> <column name="description" length="1000" /> </property> </class> </hibernate-mapping> JUnit Test Class: public class GamesDaoTests extends AbstractTransactionalDataSourceSpringContextTests { GamesDao gamesDao; public void setGamesDao(GamesDao gamesDao) { this.gamesDao = gamesDao; } protected String[] getConfigLocations() { return new String[] { "classpath:applicationContextTest.xml"}; } public void testSomething(){ ... } The key seemed to be removing the catelog attribute of class tag in the hibernate-mapping file. I dont really know why this broke things... But removing it fixed the problem. I dont know if its required when the application actually connects to my real database rather than this one in memory. This is part of a utility jar file and the mappings will be external to it when I actually deploy. Anyway, my 81 tests went from like 8.5 seconds to execute down to 3.106 seconds. BIG improvement :-) Hope this helps someone. -Seamus You showed up just in time for the waffles! And this tiny ad: Building a Better World in your Backyard by Paul Wheaton and Shawn Klassen-Koop reply reply Bookmark Topic Watch Topic New Topic Boost this thread! Similar Threads S2: Action/DAO mess... JTA Transaction database code testing DAO implementation questions Looking for advice on configuring a new project using Spring and Hibernate. More...
https://www.coderanch.com/t/428894/java/Hibernate-Spring
CC-MAIN-2021-43
refinedweb
827
50.12
Azure Explorer: Cause for Cerebration.Published 26 April 2013 9:53 am Azure Storage Account to SQL Server 2012 SP1 CU2. In order to try this out, I decided that I needed an Azure Storage account, and a means to access, check and maintain my backups. It was harder process than I hoped but fortunately, at the point of frustration, my pains eased considerably when I laid hands on a copy of a new (and free) Azure Explorer tool, from Cerebrata. Armed with decent bandwidth, Azure Storage seemed a good general option for offsite backup storage and an convenient way to store files, scripts and data if you are moving about a lot. You can store any data in Azure Storage and access it via ordinary HTTP or HTTPS protocol. This data can be either private or public but you can’t choose to make individual files public or private, only Azure containers and their contents. First things first, I needed to set up a Windows Azure Storage Account. Setting up the Windows Azure Storage Account The storage account provides the namespace for accessing data. An account can store up to 100TB, and can contain an unlimited number of containers. Each container can store an unlimited number of blobs. Each block blob can store up to 200GB of data. Page blobs can be up to 1TB in size, and are better for frequently updated data. Once you’ve signed up for an Azure Storage account, which is a slightly awkward process, you need to find your way to the Windows Azure Management Portal. Now, whatever you do, if you have an MSDN trial account, don’t do what I did and create a SQL Azure Database with the default settings. It means that when your subscription reaches its spending limit, you are locked out for a month. Yes, this system isn’t exactly friendly. Firstly, log into the Portal. Then, at the bottom of the navigation pane, click NEW. Then click on DATA SERVICES | STORAGE | QUICK CREATE. You’ll see something like this: In the URL field, you’ll need to type a subdomain name to use in the URL for the storage account. The entry can contain from 3-24 lowercase letters and numbers. Lowercase, remember. This isn’t Windows but Unix. Within the URL, the subdomain name becomes the host name used in order to address the Blob, Queue, or Table resources for the subscription. It is essentially the name of the account. You’ll need to choose a Region/Affinity Group in which to locate the storage. Click on the geo-location tick-box if you’d like to spread your bet regarding the data across regions. All you need now is to get the access keys, which Windows Azure generates and uses for authentication when accessing the storage account. Through some twisted logic, to get these, you have to click on a letter ‘i’, next to the delete button. Fortunately, the designers allowed a ‘Manage Access Keys’ label next to the ‘i’. (Odd choice of name. "what shall we call these keys. Let’s name it after one of our most famous products!") Managing Data and Backups in Azure Data Storage Having completed this stage, I got busy downloading the tools I needed to manage my data and backups, copy files locally, perform restores and so on. This entailed forty minutes of downloading and installing an SDK, a PowerShell pack, and VS2012 add-in pack, followed by a reboot. About halfway through, I had my first twinge of doubt. What on earth would I want to do all this for? I’m not thinking of building an application. I’m a user. . This is primitive stuff. My frustration grew as I searched in vain for an easy way to manage my files. All I want is a way of treating an Azure Storage account as if it were a file system, with Azure Containers as the root directories. I know it isn’t like that really, but let’s pretend it is remote file system, since all I want to do is to securely store or retrieve files and maintain a hierarchical directory in which to store them. I just want to drag and drop, or copy ‘n’ paste, files between my Azure Storage file system and my local machine, or from one Azure ‘directory’ to another. Sure, there are plenty of other things I’d like to do with them, and it is frustrating to have to spend time doing things in Azure that can be done simply with a mouse-click in a Windows domain. I hadn’t a clue what I wanted specifically from an Azure tool: something maybe like Skydrive or Dropbox, but for real Azure Storage. I just knew that I didn’t want to engage in a huge cultural shift just to manage my Azure storage. I popped over to see Cerebrata to ask them what they recommended. They listened patiently as I poured out my frustration, but then looked smug and gave me a copy of Azure Explorer (Yup, gave me). Azure Explorer Within Azure Explorer, you can create a new Azure Storage Account, equivalent to a drive, simply by giving the name of your account and the key. You can test it before you commit. That’s it. The only hassle is on Microsoft’s side of the fence setting up the account. Azure Explorer looks just like Windows Explorer, and one can work the two applications together. You can drag and drop files between them or copy n’ paste. You can load or run files directly from Azure. Within a couple of minutes, I was using it like an old friend. It transformed my experience of working with Azure. In its design, it is so close to Windows Explorer that there is no point in giving you a screen dump. It even allows you to access your local files in much the same way as you’re used to. It doesn’t have all the features of Windows Explorer, and I wouldn’t want them. It provides the essentials without all the frilly stuff. Thankfully, they use the list view, not the ‘home’ thumbnail images. It works fine, and you soon forget that you’re getting data from Azure storage. I gather from what they told me that Azure Explorer started out life as a test-bed to implement ‘explorer’-like features into Cerebrata Azure Studio, but it took on a life of its own. Sure, all the technology behind it is going into the other Cerebrata tools, but after a lot of heart-searching, they decided to give it away on their website free as well. I like ‘free’, especially a proper professionally written tool that is full of goodness. A lot of work has gone into this tool. A great deal of the smoothness of the tool has come from the fact that they clearly listened to the suggestions of their many beta-testers. To access a file directly via scripting, you right-click, click on ‘copy path’ and you can get hold of the URL. I can use PowerShell to do… $(New-Object Net.WebClient).DownloadFile(‘<TheURLICopied>’,‘<DestinationPathAndFilename’) If you make the container ‘public’, this is available to anyone. Obviously, Azure is very different from real file storage, but Azure Explorer keeps these differences in the background. In Azure storage, files equate to Block BLOBs but you can create Page BLOBs too, in Azure Explorer, if you click on the drop down under upload in the ribbon bar and select a menu item for uploading page blobs. One other difference that can present you with slight puzzlement is that the paths in Azure blob storage just denote the categorization that comes to you when you name the blob. Containers don’t contain directories. Azure Explorer masks that cleverly from the user whilst conforming to the way in which Azure arranges it. Within Azure Explorer, the container is the root directory, containing a series of files (blobs). When using Azure storage for backups, you can ensure that the backups go into the required directory, just by specifying the path, just as you always have, except you don’t have to check that all the intervening directories in the path you specify already exist. It doesn’t matter if another application uploads the backup, because the directories will appear in Azure Explorer anyway. The only time you might come unstuck is when you create directories with nothing in them. They’d be volatile if you exited Azure Explorer before creating files in them. Here’s how to get the free copy of Azure Explorer. Thanks for a great post! ‘copy path’ is very useful.
https://www.simple-talk.com/blogs/2013/04/26/azure-explorer-cause-for-cerebration/
CC-MAIN-2016-26
refinedweb
1,458
71.34
DEBSOURCES Skip Quicknav sources / glibc / 2.28-8 / posix / sys / wait <>. */ /* * POSIX Standard: 3.2.1 Wait for Process Termination <sys/wait.h> */ #ifndef _SYS_WAIT_H #define _SYS_WAIT_H 1 #include <features.h> __BEGIN_DECLS #include <bits/types.h> #ifndef __pid_t_defined typedef __pid_t pid_t; # define __pid_t_defined #endif #if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8 # include <signal.h> #endif #if defined __USE_XOPEN_EXTENDED && !defined __USE_XOPEN2K8 /* Some older standards require the contents of struct rusage to be defined here. */ # include <bits/types/struct_rusage.h> #endif /* These macros could also be defined in <stdlib.h>. */ #if !defined _STDLIB_H || (!defined __USE_XOPEN && !defined __USE_XOPEN2K8) /* This will define the `W*' macros for the flag bits to `waitpid', `wait3', and `wait4'. */ # include <bits/waitflags.h> /* This will define all the `__W*' macros. */ # include <bits/waitstatus.h> # define WEXITSTATUS(status) __WEXITSTATUS (status) # define WTERMSIG(status) __WTERMSIG (status) # define WSTOPSIG(status) __WSTOPSIG (status) # define WIFEXITED(status) __WIFEXITED (status) # define WIFSIGNALED(status) __WIFSIGNALED (status) # define WIFSTOPPED(status) __WIFSTOPPED (status) # ifdef __WIFCONTINUED # define WIFCONTINUED(status) __WIFCONTINUED (status) # endif #endif /* <stdlib.h> not included. */ #ifdef __USE_MISC # define WCOREFLAG __WCOREFLAG # define WCOREDUMP(status) __WCOREDUMP (status) # define W_EXITCODE(ret, sig) __W_EXITCODE (ret, sig) # define W_STOPCODE(sig) __W_STOPCODE (sig) #endif /* Wait for a child to die. When one does, put its status in *STAT_LOC and return its process ID. For errors, return (pid_t) -1. This function is a cancellation point and therefore not marked with __THROW. */ extern __pid_t wait (int *__stat_loc); #ifdef __USE_MISC /* Special values for the PID argument to `waitpid' and `wait4'. */ # define WAIT_ANY (-1) /* Any process. */ # define WAIT_MYPGRP 0 /* Any process in my process group. */ #endif /* Wait for a child matching PID to die. If PID is greater than 0, match any process whose process ID is PID. If PID is (pid_t) -1, match any process. If PID is (pid_t) 0, match any process with the same process group as the current process. If PID is less than -1, match any process whose process group is the absolute value of PID. If the WNOHANG bit is set in OPTIONS, and that child is not already dead, return (pid_t) 0. If successful, return PID and store the dead child's status in STAT_LOC. Return (pid_t) -1 for errors. If the WUNTRACED bit is set in OPTIONS, return status for stopped children; otherwise don't. This function is a cancellation point and therefore not marked with __THROW. */ extern __pid_t waitpid (__pid_t __pid, int *__stat_loc, int __options); #if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8 # ifndef __id_t_defined typedef __id_t id_t; # define __id_t_defined # endif # include <bits/types/siginfo_t.h> /* Wait for a childing matching IDTYPE and ID to change the status and place appropriate information in *INFOP. If IDTYPE is P_PID, match any process whose process ID is ID. If IDTYPE is P_PGID, match any process whose process group is ID. If IDTYPE is P_ALL, match any process. If the WNOHANG bit is set in OPTIONS, and that child is not already dead, clear *INFOP and return 0. If successful, store exit code and status in *INFOP. This function is a cancellation point and therefore not marked with __THROW. */ extern int waitid (idtype_t __idtype, __id_t __id, siginfo_t *__infop, int __options); #endif #if defined __USE_MISC \ || (defined __USE_XOPEN_EXTENDED && !defined __USE_XOPEN2K) /* This being here makes the prototypes valid whether or not we have already included <sys/resource.h> to define `struct rusage'. */ struct rusage; /* Wait for a child to exit. When one does, put its status in *STAT_LOC and return its process ID. For errors return (pid_t) -1. If USAGE is not nil, store information about the child's resource usage there. If the WUNTRACED bit is set in OPTIONS, return status for stopped children; otherwise don't. */ extern __pid_t wait3 (int *__stat_loc, int __options, struct rusage * __usage) __THROWNL; #endif #ifdef __USE_MISC /* PID is like waitpid. Other args are like wait3. */ extern __pid_t wait4 (__pid_t __pid, int *__stat_loc, int __options, struct rusage *__usage) __THROWNL; #endif /* Use misc. */ __END_DECLS #endif /* sys/wait.h */
https://sources.debian.org/src/glibc/2.28-8/posix/sys/wait.h/
CC-MAIN-2019-39
refinedweb
643
67.96
1) overview of Generics and its usage in different context with samples. To start with, it illustrates the need for Generics and the difficulties faced by the Developers before its origin. It will explain in detail on how to write Generic Classes, Generic Methods and so on. Then the various aspects of Bound Constraints and Wild-cards will be discussed. also read: Java Generics In this section let us explore the basis of Generics programming. As mentioned in the introductory section, Generics programming enables Classes and Methods to operate on well defined parametric types allowing clients to substitute a suitable Java type at the compile time. This prevents un-necessary casting being done in the Application code and to prevent any wrong data-type being used. To make things clear, consider the following statements. Map contacts = new HashMap(); contacts.put(new Long(9912345678L), "Jenny"); contacts.put(new Long(9912345679L), "Johny"); Set contactValues = contacts.entrySet(); Iterator contactIterator = contactValues.iterator(); while (contactIterator.hasNext()) { Map.Entry anEntry = (Map.Entry)contactIterator.next(); // Line A Long number = (Long)anEntry.getKey(); // Line B String name = (String)anEntry.getValue(); // Line C System.out.println(number + ":" + name); } The above code populates a Map, keyed with mobile phone for a person name. The next pieces of code try to iterate over the Map, thereby printing the data within it. Look at the casts down at lines A, B and C. Even though we know that we are going to add phone numbers (which is of type Long) and a person name (probably a String) into the Map, we are doing an explicit cast to get the appropriate data. The other problem we find here is, what if the client put some other data other than the Long data-type for the Key. contacts.put(new String("9912345678L"), "Jenny"); The above code will definitely raise an exception at the run-time. So, we find two major dis-advantages in the older code (code compiled with java 1.4 compiler or before). One is the need to have absurd cast that is being spread across the code. The other thing is that there is no procedural mechanism through which we can prevent wrong data-type being added to the above Collection. The solution to the above problems is having Generics in the programming code. Let us see how the above code is re-written using Generics. Map contacts = new HashMap(); contacts.put(new Long(9912345678L), "Jenny"); contacts.put(new Long(9912345679L), "Johny"); Set<Map.Entry> contactValues = contacts.entrySet(); Iterator<Map.Entry> contactIterator = contactValues.iterator(); while (contactIterator.hasNext()) { Map.Entry anEntry = contactIterator.next(); Long number = anEntry.getKey(); String name = anEntry.getValue(); System.out.println(number + ":" + name); } Let us analyze what has happened in the above code. The declaration Map contacts = new HashMap(); has now changed to Map contacts = new HashMap(); The former is called a Raw Map and the latter is an example of a Generic Map. If we look at the declaration of the Map and the HashMap classes, we will find something similar to the following, public interface Map { ... } The above declaration can be interpreted as : Map has two parametric types called K (meaning Key) and V (meaning Value). The name of the parametric types can be anything, that doesn’t matter. These changes to Map and all its related collection classes is there right from Java 5.0. So, whenever a clients references a Map interface, it cannot plainly do like the following, Map mapObject = new HashMap(); The compiler (Java 5.0) as soon as encountering this statement will issue a warning telling that, the declaration of Map is a raw-type and its references should be parameterized. Since the declaration of the Map interface is now parameterized with Keys and Values in the form of , the client referencing the Map should provide a suitable type for the parametric types. Going back to our code, Map contacts = new HashMap(); We want the key for the Map to be of type Long and the value for the corresponding Key to be of type String. We also have parameterized the HashMap class with Long and String, since the class declaration for HashMap has also changed. public class HashMap implements Map { … } The usage of parametric types has not only affected Map and HashMap but all the collection related classes and interfaces in the java.util package. In our code, contacts are made to populate in the map object by calling the Map.put() method. Now, the call to Map.entrySet() will return a Set containing entries which is of type Map.Entry. Now let us have a look over the Map.entrySet() method, Set<Map.Entry> entrySet(); The return type of Map.entrySet() is a Set which is parameterized with Map.Entry. Map.Entry is a class that will store an entry, which is nothing but a combination of Key and value. Note that Map.Entry is again parameterized with K (for Key) and V (for Value). In our case, the key is of type Long and the value is of type String, we have something like the following in the later part of the code, Set<Map.Entry> contactValues = contacts.entrySet(); The same thing applies for Iterator which is parameterized with Map.Entry which is again parameterized with Long and String. While traversing over the elements within the while loop, we have statements like the following, ... Map.Entry anEntry = contactIterator.next(); Long number = anEntry.getKey(); String name = anEntry.getValue(); ... Since we well know that the Iterator is typed with Map.Entry, there is no need for an explicit type-cast. Same is the case for Map.getKey() and Map.getValue(). Since the Map has been parameterized with Long and String, it is not possible to add types other than the defined types. So now the following code will raise a compilation error. contacts.put(new String(""), new Long(1L)); This ensures type-safe programming and it prevents the client code from adding any wrong data-type to the Collection. 3) Writing Generic Classes Let us see how to create our own classes using Generics. Let us keep the purpose of the class as simple as holding some Object, the Object Holder class. This Object Holder class can hold any type of Java object. It provides method for getting and setting the current Object. The following shows the class declaration of the Object Holder class. ObjectHolder.java package generics.classes; public class ObjectHolder { private O anyObject; public O getObject() { return anyObject; } public void setObject(O anyObject) { this.anyObject = anyObject; } public String toString() { return anyObject.toString(); } } Note the following syntax, public class ObjectHolder The above statement essentially says that we wish to make the Object Holder class as a Generic Class. Technically, Object Holder is now a parametric class and O is called a type parameter. O serves as a place-holder for holding any type of Object. Note the usage of the type parameter within the class declaration. Now, the clients can use the above class by substituting any kind of object for the place-holder O. Consider the following Client program that makes use of the Object Holder class. ObjectHolderClient.java package generics.classes; import java.net.URL; public class ObjectHolderClient { public static void main(String[] args) throws Exception { ObjectHolder stringHolder = new ObjectHolder(); stringHolder.setObject(new String("String")); System.out.println(stringHolder.toString()); ObjectHolder urlHolder = new ObjectHolder(); urlHolder.setObject(new URL("")); System.out.println(urlHolder.toString()); } } Note how the Clients instantiates an instance for the Object Holder class. ObjectHolder stringHolder = new ObjectHolder(); This is called type substitution. For the type parameter O, the type String is substituted. And now the calls to ObjectHolder.setObject(O anyObject) and O ObjectHolder.getObject() can be imagined as ObjectHolder.setObject(String anyObject) and String ObjectHolder.getObject(). Now, let us see another example of Generic classes having two or more parametric types. Assume that we want to represent a class that holds a Composite object along with the elements of the composite object. We can see this kind of Composiste – children relation-ship in a number of places. For example, a Folder containing multiple files or a Window containing a number of UI Components and so on. Following is the representation of the Container With Elements ( ContainerWithElements) class holding a Container object with its children. Note that the class typed parameters have the names Container and Elements respectively for holding the Container object and its child elements. ContainerWithElements.java package generics.classes; import java.util.List; public class ContainerWithElements { private Container outerObject; private List innerObjects; public ContainerWithElements(Container outerObject, List innerObjects) { this.outerObject = outerObject; this.innerObjects = innerObjects; } public Container getOuterObject() { return outerObject; } public void setOuterObject(Container outerObject) { this.outerObject = outerObject; } public List getInnerObjects() { return innerObjects; } public void setInnerObjects(List innerObjects) { this.innerObjects = innerObjects; } } The Client program that makes use of the above class is given below. Note the usage of the classes Folder, File, Window and Button are from the generics.classes package and they have nothing to do with the genuine java.* classes. ContainerWithElementsClient.java package generics.classes; import java.util.Arrays; public class ContainerWithElementsClient { public static void main(String[] args) { ContainerWithElements folderWithFiles = new ContainerWithElements( new Folder(), Arrays.asList(new File(), new File())); ContainerWithElements windowWithButtons = new ContainerWithElements( new Window(), Arrays.asList(new Button(), new Button())); } } For code completeness, the declaration of the classes Folder, File, Window and Button is given below. class Folder{} class File{} class Window{} class Button{} 4) Writing Generic Methods In the previous section, we saw how to write Parameterized Classes. Now, let us spend time in exercising Parameterized Methods or Generic methods in this section. A Generic class containing a type parameter affects the entire class, but a generic method containing one or more type parameters affects that particular method only. So it means that a non-generic class can contain a mixture of generic and non-generic methods. Following code snippet shows how to declare a Generic method. GenericMethods.java package generics.methods; public class GenericMethods { static void printType(T anyType) { System.out.println(anyType.getClass().getName()); } public static void main(String[] args) { GenericMethods.printType(String.class); GenericMethods.printType(new String("")); } } If we look at the way in which a Generic method is declared, we find that the static method printType() has a return type void and it takes a single parameter called T. Here T stands for any parametric type which can be substituted with any of the Java types by the Clients. Since we have introduced a parameter T, it should be defined. But where? It should be defined in the method definition itself just before the return type of the method (). The moral is whenever we have different type parameters in a method, it should be defined in the method definition. For example, consider the following method that has two type parameters A and B and they are defined before the return type of the method separated by commas. void aGgenericMethod(A aType, B bType) { // Something here. } But the same type parameter can de used multiple number of times in the parameter list. For example, the type paramter A is defined once but used multiple times in the following code, void aGgenericMethod(A aType, A anotherType, B bType) { // Something here. } 5) Wildcards The following sections explain the usage of Wild-card character in Generics. For example, consider the following, strObjects.add(new String("A String")); strObjects.add(new String("Another String")); The above line declares an Array List with a type being the String type. The declaration says that the list ( strObjects) in this case, can hold objects only of type java.lang.String. No other type is permitted other than java.lang.String. So, the following statements are legal as they are adding objects of type java.lang.String. List anotherStrObjects = strObjects; Now, consider the following assignment statement which assigns the object reference strObjects to anotherStrObjects. Now anotherStrObjects is pointing to strObjects reference. List The above is a perfectly legal statement as the assignment happens between the same type. Now, consider the following, LObject someObject = new String(""); The above statement will lead to a compiler error telling that “Cannot convert from List to List<Object”. Though String is a sub-class of the Object class, this type of conversion is not possible in Generics, but the following code compiles well. List objects = strObjects; The reason why it is not possible to assign the List containing strings to a List that can hold Objects can be given justification as follows. The main goal of having Generic code in a program is to ensure type-safety. It means that the expected Application types and the types sent by the Clients should match. There should not be any deviation or mis-match in their types. If we go back to the code, on the left-hand side, we have a List that can hold Objects represented by List objects. This tells to the compiler that this List can hold objects of type java.lang.Object. Now we are making this object List to point to some other list whose parametric type is String. Though String is a concrete sub-class of Object, this assignment is not possible. Let us see how to get over this problem. Consider the following code, Object someObject = new String(""); The character '?' is a wild-card character and it stands for any Java type. It can be java.lang.Object type or some other type. It is just a place-holder that tells that it can be assigned with any type. Considering this case, the following are now valid syntaxes. List anyObjects = null; List integers = new ArrayList(); anyObjects = integers; List doubles = new ArrayList(); anyObjects = doubles; The above code snippet tries to assign a list of integers and a list of doubles to the reference anyObjects. This is perfectly legal. Another strange thing has to be considered is while adding elements into the collection. Since the type parameter for the reference anyObjects is '?', no objects can be added to the collection, not even java.lang.Object. The only exception is that only null can be added to these type of collection. anyObjects.add(new Integer(1)); // Wont compile anyObjects.add(new Double(1.0)); // Wont compile anyObjects.add(null); // This will compile. 6) Bounding the Parameterized Types Generics won’t be complete if this section is not covered. It is all about bounding parametric types. Till now, we have seen parametric types operate on a single java type like Object or String. Now, let us see how the parametric types can be restricted by applying some constraints over them. For this to be illustrated, let us take a sample application called Animal Actions. Animal Actions class performs various operations on a given Animal like: making them eat, sleep and run. The first constraint that we see here is that only an Animal object can be passed to the Animal Actions class. Let us represent the Animal class as follows. Animal.java package generics.bounds; public abstract class Animal { // Some common functionalities here. } Note that the Animal class is declared as abstract, meaning that some other concrete class is going to extend this Animal class. Another restriction that we see in Animal Actions class is that they will make the Animals to sleep, eat and run. Since these are behaviors and we may give different representations for the same, let them be modeled as interfaces. Following code shows the interface design for the behaviors, package generics.bounds; interface Sleepable { public void sleep(); } interface Runnable { public void run(); } interface Eatable { public void eat(); } Let us give implementation of the above behaviors for some animal, say Dog. The following code snippet is for the implementation of Dog which conforms to eating, sleeping and running behavior. Dog.java package generics.bounds; public class Dog extends Animal implements Sleepable, Runnable, Eatable { public void sleep() { System.out.println("Dog is sleeping"); } public void run() { System.out.println("Dog is running"); } public void eat() { System.out.println("Dog is eating"); } Now, let us design the Animal Actions class. The restriction we have on Animal Actions class is that, we should operation on any type of object that is an Animal which can eat, sleep and run. Look into the following Animal Actions class, AnimalActions.java package generics.bounds; public class AnimalActions<a> { private A animal; public AnimalActions(A animal) { this.animal = animal; } public A getAnimal() { return animal; } public void setAnimal(A animal) { this.animal = animal; } public void doActions() { animal.sleep(); animal.run(); animal.eat(); } }</a> The declaration of the parameterized class looks like the following, public class AnimalActions Let us break down the pieces in the above declaration. The first trivial stuff that has to be noted is the declaration of the typed parameter A (which is for Animal). The next set of expressions are imposing restrictions on the typed parameter A. The phrase 'A extends Animal' tells that whatever type we pass for the substitution parameter must extend/implement the Animal class/interface. The type that comes after extends can either be an interface or a class. It is illegal to mention something like the following, public class AnimalActions Only extends keyword is used for both class as well the interface and not the implements keyword. If we want the parametric type to confirm by more than one classes or interfaces, then every types should be separated by the symbol &. For example, in our case, we want some Animal to eat, sleep and run by implementing the Eatable, Sleepable and Runnable interface. So, we have declared something like AnimalActions<A extends Animal & Sleepable & Runnable & Eatable. To sum up things, the generic class declaration can be interpreted like this; it can be passed with any type that implements/extends Animal, Sleepable, Runnable and Eatable types. Following code snippet makes use of the above Animal Actions class. In the below code, a new instance of Dog object is created and passed on to the constructor of the Animal Actions class. This is perfectly valid as the Dog class extends the Animal class and it also implements Sleepable, Eatable and Runnable interfaces. It then makes a call to AnimalActions.doActions(), thereby the execution gets directed towards Dog.sleep(), Dog.eat() and Dot.run(). AnimalActionsTest.java package generics.bounds; public class AnimalActionsTest { public static void main(String[] args) { AnimalActions animal = new AnimalActions(new Dog()); animal.doActions(); } } 7) More on parametric bounds and wild-cards The restriction on parametric types that is applied on class definition is also applicable to method definition. Let us move towards assignment now. Consider the following statement, List animals = new ArrayList(); The above is a declaration of list that essentially tells to the compiler that it can hold Animal objects. So the following statements is perfectly valid. animals.add(new Animal()); // Fine. Assuming that Animal class is not abstract. Not only it is possible to add Animal objects but also any types that extends the Animal class. By having this rule in hand, it is perfectly possible to have the following statements, animals.add(new Dog()); // This will work too. animals.add(new Cat()); // This also. Even though, it is possible to add any type that extends the Animal class, it is not possible to have the following statement. List dogs = new ArrayList(); dogs.add(new Dog()); dogs.add(new Dog()); animals = dogs; // This wont compile. The compiler will warn you telling that it is not possible to convert List to List. Even a type-casting on the above statement doesn’t work. animals = (List<Animal>)dogs; // This won't work. Since type-casting from one type to another type is a run-time operation and during run-time there is no such existence of the types List or List because of erasures. All the typed-parameters won’t be available in the class-file and the above code doesn’t work. 7.1) Upper Bound The solution to this situation is the usage of wild-cards along with parametric bounding. Have a look over the following declaration. List animals = new ArrayList(); It tells that a list is being declared with type being anything (?) that is extending the Animal class. Though it looks very similar to the above declaration it has some differences. The first thing is that it is now possible for the animals reference to point to a list that is holding any sub-type of Animal objects. List dogs = new ArrayList(); dogs.add(new Dog()); dogs.add(new Dog()); animals = dogs; One important difference is that, it is not possible to add elements to the animals list, though the only exception is adding null elements. This is called the Upper Bound for the animals list. 7.2) Lower Bound Similar to Upper Bounds, we have Lower Bounds with the following syntax, List dogs = new ArrayList(); This declaration tells that along with Dog type, all other sub-types of Dog is also allowed. Not any of the super-type of Dog can be added including java.lang.Object. So, if we have classes like GoodDog and BadDog, both extending the Dog class, then the following statements are legal. dogs.add(new Dog()); dogs.add(new GoodDog()); dogs.add(new BadDog()); 8) Conclusion One of the major language and syntax change in Java 5.0 is Java Generics. Through Generics, it is now possible to have a programming model that operates on some parametric type and the same model can be re-used with different types. It is important to note that there will be only one class file for the whole Generic type, unlike other programming languages (in the name of templates in C++). Generics makes use of the concept of Erasure which erases all the Generic type information during the compilation process. your explanation is very good.i learned some thing Thank you for reading my blog!!
http://www.javabeat.net/generics-in-java-5-0/
CC-MAIN-2015-14
refinedweb
3,628
58.38
I have a text file from amazon, containing the following info: # user item time rating review text (the header is added by me for explanation, not in the text file disjiad123 TYh23hs9 13160032 5 I love this phone as it is easy to use hjf2329ccc TGjsk123 14423321 3 Suck restaurant pd.read_csv(filename, sep = " ", header = None, names = ["user","item","time","rating", "review"], usecols = ["user", "item", "rating"])#I'd like to skip the text review part ValueError: Passed header names mismatches usecols pd.read_csv(filename, sep = " ", header = None) Error tokenizing data. C error: Expected 229 fields in line 3, saw 320 # review text user item time rating (the header is added by me for explanation, not in the text file I love this phone as it is easy to used isjiad123 TYh23hs9 13160032 5 Suck restaurant hjf2329ccc TGjsk123 14423321 3 As suggested, DictReader could also be used as follows to create a list of rows. This could then be imported as a frame in pandas: import pandas as pd import csv rows = [] csv_header = ['user', 'item', 'time', 'rating', 'review'] frame_header = ['user', 'item', 'rating', 'review'] with open('input.csv', 'rb') as f_input: for row in csv.DictReader(f_input, delimiter=' ', fieldnames=csv_header[:-1], restkey=csv_header[-1], skipinitialspace=True): try: rows.append([row['user'], row['item'], row['rating'], ' '.join(row['review'])]) except KeyError, e: rows.append([row['user'], row['item'], row['rating'], ' ']) frame = pd.DataFrame(rows, columns=frame_header) print frame This would display the following: user item rating review 0 disjiad123 TYh23hs9 5 I love this phone as it is easy to use 1 hjf2329ccc TGjsk123 3 Suck restaurant
https://codedump.io/share/jlrPEa6oDLvU/1/how-to-read-the-csv-file-properly-if-each-row-contains-different-number-of-filedsnumber-quite-big
CC-MAIN-2016-44
refinedweb
264
50.16
Groovy supports closures and they are very useful when we create Groovy applications. For example we can pass closures as arguments to methods to execute them. We can create closures ourselves, but we can also convert a method to a closure with the .& operator. And we can use the converted method just like a normal closure. Because Groovy can use Java objects we can also convert a Java method into a closure. Let's start with a simple Java class: public class JavaObject { public static void javaSays(final String s) { System.out.println("Java says: Hello " + s + "!"); } } With the following script we use this Java class and convert the javaSays method to a closure: // Simple list with names. def names = ['groovy', 'grails', 'mrhaki'] // Simple closure. names.each { println 'Normal closure says: Hello ' + it + '!' } // Groovy method to convert to closure. def groovySays(s) { "Groovy says: Hello ${s}!" } // Use .& syntax to convert method to closure. names.each(this.&groovySays) // Convert Java method to closure and use it. def javaSays = JavaObject.&javaSays names.each javaSays If we run this script we get the following output: Normal closure says: Hello groovy! Normal closure says: Hello grails! Normal closure says: Hello mrhaki! Groovy says: Hello groovy! Groovy says: Hello grails! Groovy says: Hello mrhaki! Java says: Hello groovy! Java says: Hello grails! Java says: Hello mrhaki!
http://mrhaki.blogspot.com/2009/08/groovy-goodness-turn-methods-into.html
CC-MAIN-2017-34
refinedweb
221
71.1
In this recipe, we will create a custom container with a title and content. When pressing the title, the content will collapse or expand. We will explore the layout animation API for this example, as it will help us to accomplish our goal in a few steps only. Let's start by creating a new app, using the React Native CLI. We will call it CollapsibleApp, but feel free to use any other name. Once we have created the app, let's create two files at the root of the project, src/MainApp.js and src/Panel/index.js. These are the only two files we will be working on. index.ios.jsfile; remove the existing code and add the following to bootstrap the app: import ... No credit card required
https://www.oreilly.com/library/view/react-native-cookbook/9781786462558/ch03s05.html
CC-MAIN-2019-30
refinedweb
130
76.11
Issues ZF-4117: Zend_Soap_AutoDiscover is using hardcoded http schema Description Zend_Soap_AutoDiscover is using Zend_Uri::factory('http://' ... to create a services uri - it should at least be able to correctly discover https. Kind regards, Thomas Gelf Posted by Thomas Gelf (tgelf) on 2008-08-29T07:41:59.000+0000 You can find a possible way to fix this issue in the attached patch file. Cheers, Thomas Posted by Marcus Welz (lucidix) on 2008-08-29T10:19:29.000+0000 Would it make sense to allow for more flexibility with setting the Uri? For cases of mod_rewrite, in context of MVC, etc. Patch attached. Thanks! Posted by Thomas Gelf (tgelf) on 2008-08-29T16:16:22.000+0000 Full ack, absolutely great! Posted by Apaella (apaella) on 2008-09-03T03:30:10.000+0000 I ran in the same problem. Same solution :D Posted by Marcus Welz (lucidix) on 2008-09-12T13:27:30.000+0000 Zend_Soap_AutoDiscover.targetNamespace.patch allows for setting the soap endpoint url separately from the target namespace. Posted by Thomas Gelf (tgelf) on 2008-09-16T06:25:14.000+0000 Like for Ing. Jitka Darbujanova in ZF-4172 this issue is a showstopper for me, Zend_Soap_AutoDiscover disallows HTTPS and setting uri / location / whatever on a personal choice is impossible. The patches attached to this bug report are easy to understand and shouldn't break anything - it would be great if someone could take care of this ticket. Issue is still there in 1.6.1. Best regards, Thomas Gelf Posted by Thomas Gelf (tgelf) on 2008-09-16T12:27:32.000+0000 Oops! I somehow managed it to toggle assignment, sorry. I'll let it be reassigned automatically... Posted by Benjamin Eberlei (beberlei) on 2008-10-07T00:47:12.000+0000 Sorry, all three patches are bogus.. The first one is just a hack to solve the HTTPS problem, the second and third ones ignore the setFunction alltogether. None provides unittests and documentation changes. Also the one with setUri (Zend_Soap_Autodiscover.uri.php) has the problem that when setClass is called BEFORE setUri, the uri setting is ignored. This is not desired behaviour since its common expectation that the setter functions of webservice objects (like SOAPServer) only begin to act on calling $server->handle(); I have attached a patch that allows changing the default wsdl location uri only by constructor, to handle the problem described in the above paragraph. I have added a unittest to check for the changed webservice url and a proposed change to the documentation. To allow for a setUri function to be accessible for the public scope, underlying changes to the WSDL component have to be done. Therefore my setUri function is just private for the time being, hoping that the WSDL component will be refactored someday to be more flexible. Posted by Benjamin Eberlei (beberlei) on 2008-10-07T00:48:50.000+0000 Posted by Benjamin Eberlei (beberlei) on 2008-10-07T00:50:05.000+0000 The corresponding file to this comments are AutoDiscoverTestSetUri.php.patch and AutoDiscoverSetUri.php.patch Posted by Benjamin Eberlei (beberlei) on 2008-10-26T11:14:29.000+0000 Fixed in Trunk via second constructor parameter $uri or function setUri($uri). Posted by Thomas Gelf (tgelf) on 2008-10-27T05:58:04.000+0000 Thank you for fixing this, setUri() is a great addition. However my initial bug report regarded hardcoded http schema and autodetection. My proposed patch is not a hack, schema detection is done the same way in other parts of the Zend Framework (Controller, OpenID...). And it requires no new documentation as it fixes existing behaviour conforming to what a developer would expect from an "autodiscover" component. Not using HTTPS is not an option for most webservices today, and if there is an autodetection component it should also detect wheter I'm running HTTPS or not. I do NOT want to have to either do schema detection in my controllers and I do NOT want to hardcode / configure URIs in my application. Schema detection should work out of the box - and adding port detection for non-standard ports would also be a nice-to-have feature. Adding schema detection from my very first patch to your setUri() function would solve this, adding port detection (like Zend_OpenId::selfUrl()) would be a nice addition. Best regards, Thomas Gelf NB: Sorry for impudently reopening this issue ;-) Posted by Benjamin Eberlei (beberlei) on 2008-10-27T08:51:07.000+0000 i am sorry, misunderstood the issue (overstepped it). I'll fix the HTTPS issue right away. Posted by Benjamin Eberlei (beberlei) on 2008-10-27T09:05:14.000+0000 Fixed detection of HTTPS schema in trunk Posted by Wil Sinclair (wil) on 2008-11-13T14:10:08.000+0000 Changing issues in preparation for the 1.7.0 release.
http://framework.zend.com/issues/browse/ZF-4117?focusedCommentId=24510&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel
CC-MAIN-2015-11
refinedweb
798
58.18
Hello Everybody, I am working on a very simple algorithm and I only want it to live trade once per day right before closing (like a daily backtest works) (I may want to make the trades at the beginning of the day depending on the results). I want to do this because the funds are leveraged ETFs so I lose money if I trade more than once per day. The only problem is that I don't know what I am doing wrong with the schedule_function. I keep getting the error: Line 4: Error: SyntaxError: pass the non-keyword argument before the keyword argument. Here is the code: def initialize(context): context.stocks = [sid(32272), sid(39214), sid(32269), sid(39215), sid(32270), sid(38533), sid(32271), sid(39216), sid(33209), sid(33208), sid(39217), sid(38564)] schedule_function(func=handle_data, date_rules.every_day(), time_rules.market_close(hours=0, minutes=1), half_days=True) def handle_data(context, data): for stock in context.stocks: if stock in data: order_target_percent(stock,0.08333333333) Thanks for all of the help, Nick V
https://www.quantopian.com/posts/schedule-function-problem
CC-MAIN-2018-13
refinedweb
175
63.49
5.1.The while Loop A while loop will check a condition and then continues to execute a block of code as long as the condition evaluates to a boolean value of true. Its syntax is as follows: while () { }. The statements can be any valid C# statements. The boolean expression is evaluated before any code in the following block has executed. When the boolean expression evaluates to true, the statements will execute and recheck the boolean expression again and again until the boolean expression false, the statements will stopped. The While Loop: WhileLoop.cs using System; class WhileLoop { public static void Main() { int index = 1; while (index <= 10) { Console.Write("{0} ", index); index++; } Console.ReadKey(); } } Example while loop Repetition Statements
https://www.onlinetrainingzone.org/c-2/?section=the-while-loop
CC-MAIN-2020-29
refinedweb
120
67.04
This pandemic has taken a huge toll on my mental and emotional health. In order to keep me occupied and brighten up the lives of those around me, I started on yet another Python project — this time, a WhatsApp bot that sends me random cat pictures, trending memes, the best cooking recipes, and of course, the latest world news and COVID19 statistics. The full project can be found on my GitHub repository, and my webhook is live on. Prerequisites We will be using Python, the Django web framework, ngrok and Twilio to create this chatbot. I will show you how to install the required packages, but you need to have Python (3.6 or newer) and a smartphone with an active phone number and WhatsApp installed. Following Python best practices, we will create a virtual environment for our project, and install the required packages. First, create the project directory. $ mkdir whatsapp-bot $ cd whatsapp-bot Now, create a virtual environment and install the required packages. For macOS and Unix systems: $ python3 -m venv whatsapp-bot-venv $ source whatsapp-bot-venv/bin/activate (whatsapp-bot-venv) $ pip install twilio django requests For Windows: $ python3 -m venv whatsapp-bot-venv $ whatsapp-bot-venv\Scripts\activate (whatsapp-bot-venv) $ pip install twilio django requests Configuring Twilio You will need a free Twilio account, which allows you to use a Twilio number as your WhatsApp bot. A free account comes with a trial balance that will be enough to send and receive messages for weeks to come. If you wish to continue using your bot after your trial balance is up, you can top up your account. You won’t be able to use your own number unless you obtain permission from WhatsApp, but the Twilio number would be good enough for this project. You will need to set up your Twilio sandbox here by sending a WhatsApp message to the Twilio number. This has to be done once and only once. Setting Up Your Webhook Twilio uses what is called a webhook to communicate with our application. Our chatbot application would need to define an endpoint to be configured as this webhook so that Twilio can communicate with our application. Django is a web framework that allows us to do just that. Although the Django vs. Flask debate can go on for eternity, I chose to use Django simply because I have just started using it a few weeks ago and I wanted to get used to using it. You can use Flask to achieve the same thing, but the code would be different. First, navigate to your whatsapp-bot directory and establish a Django project. (whatsapp-bot-venv) $ django-admin startproject bot This will auto-generate some files for your project skeleton: bot/ manage.py bot/ __init__.py settings.py urls.py asgi.py wsgi.py Now, navigate to the directory you just created (make sure you are in the same directory as manage.py) and create your app directory. (whatsapp-bot-venv) $ python manage.py startapp bot_app This will create the following: bot_app/ __init__.py admin.py apps.py migrations/ __init__.py models.py tests.py views.py For the sake of this chatbot alone, we won’t need most of these files. They will only be relevant if you decide to expand your project into a full website. What we need to do is to define a webhook for Twilio. Your views.py file processes HTTP requests and responses for your web application. Twilio will send a POST request to your specified URL, which will map to a view function, which will return a response to Twilio. from twilio.twiml.messaging_response import MessagingResponse from django.views.decorators.csrf import csrf_exempt @csrf_exempt def index(request): if request.method == 'POST': # retrieve incoming message from POST request in lowercase incoming_msg = request.POST['Body'].lower() # create Twilio XML response resp = MessagingResponse() msg = resp.message() This creates an index view, which will process the Twilio POST requests. We retrieve the message sent by the user to the chatbot and turn it into lowercase so that we do not need to worry about whether the user capitalizes his message. Twilio expects a TwiML (an XML-based language) response from our webhook. MessagingResponse() creates a response object for this purpose. resp = MessagingResponse() msg = resp.message() msg.body('My Response') msg.media('') Doing this would create a response consisting of both text and media. Note that the media has to be in the form of a URL, and must be publicly accessible. from twilio.twiml.messaging_response import MessagingResponse from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse @csrf_exempt def index(request): if request.method == 'POST': # retrieve incoming message from POST request in lowercase incoming_msg = request.POST['Body'].lower() # create Twilio XML response resp = MessagingResponse() msg = resp.message() if incoming_msg == 'hello': response = "*Hi! I am the Quarantine Bot*" msg.body(response) return HttpResponse(str(resp)) With this knowledge, we can now return a HttpResponse that tells Twilio to send the message “Hi! I am the Quarantine Bot” back to the user. The asterisks (*) are for text formatting — WhatsApp will bold our message. This won’t work unless we link it to a URL. In the bot_app directory, create a file urls.py. Include the following code: from django.urls import path from . import views urlpatterns = [ path('', views.index, name='index'), ] Now, we need the root URLconf to point to our bot_app/urls.py. In bot/urls.py, add the following code: from django.contrib import admin from django.urls import include, path urlpatterns = [ path('bot/', include('bot_app.urls')), path('admin/', admin.site.urls), ] The include() function allows referencing other URLconfs. Whenever Django encounters include(), it chops off whatever part of the URL matched up to that point and sends the remaining string to the included URLconf for further processing. When Twilio sends a POST request to bot/, it will reference bot_app.urls, which references views.index, where the request will be processed. Testing It Works Make sure you are in the directory with manage.py, and run (whatsapp-bot-venv) $ python manage.py runserver You should see the port that your Django application is running on. In this screenshot, it is port 8000. But this is still running from your computer. To make this service reachable from the Internet we need to use ngrok. Open a second terminal window, and run $ ngrok http 8000 The lines beginning with forwarding tell you the public URL ngrok uses to redirect requests to your computer. In this screenshot, is redirecting requests to my computer on port 8000. Copy this URL, and go back to your Twilio console. Paste the URL into the “When a message comes in” field. Set the request method to HTTP post. If you want to use my app, use instead for the “When a message comes in” field. In your settings.py, you also need to add your ngrok URL as one of the ALLOWED_HOSTS. Now you can start sending messages to the chatbot from your smartphone that you connected to the sandbox at the start of this tutorial. Try sending ‘hello’. Adding Third-Party APIs In order to accomplish most of our features, we have to use third-party APIs. For instance, I used Dog CEO’s Dog API to get a random dog image every time the user sends the word ‘dog’. import requests ... elif incoming_msg == 'dog': # return a dog pic r = requests.get('') data = r.json() msg.media(data['message']) ... requests.get(url) sends a GET request to the specified URL and returns the response. Since the response is in JSON, we can use r.json() to convert it into a Python dictionary. We then use msg.media() to add the dog picture to the response. My Final Code My final chatbot includes the following features: - Random cat image - Random dog image - Random motivational quote - Fetching recipes from Allrecipes - Latest world news from various sources - Latest COVID19 statistics for each country - Trending memes from r/memes subreddit Deploying Your Webhook Once you’re done coding, you probably want to deploy your webhook somewhere so that it runs 24/7. PythonAnywhere provides a free Django-friendly hosting service. In order to deploy your project on PythonAnywhere, upload your project to GitHub and follow the documentation to set up your web app. Once deployed, update your Twilio sandbox configuration so that the webhook is set to your new URL (such as). Thanks for Reading! Now you know how to set up your very own WhatsApp chatbot with Twilio. If you have any questions, please feel free to comment on this post. Posted on by: Zhang Zeyu Simple is better than complex. Complex is better than complicated. Discussion This is a really nice post! I tried to implement my own version of it, but have run into errors while sending requests to Twilio. The logs say that my ngrok URL is returning a 404. I've done exactly what you have and thus this is very annoying. Do you have any ideas as to what the issue may be? Hi there, thanks for reading :) Can you check whether the URL you set in the Twilio sandbox configuration includes the /bot/ at the end? In our urls.py, we have configured the application on the /bot/ URL. So if the nrgok URL is c21d2af6.ngrok.io, you would have to use c21d2af6.ngrok.io/bot/. Alternatively, you could change this by editing this line in urls.py to Let me know if this helps! Hi, thanks for the quick response. It still doesn't work, unfortunately. The URLs change every time the server is started, yes? I tried to play around with that and added /bot/ at the end, but it still doesn't seem to work for me. /bot/ is added only in the Twilio config, yes? Or does that go in the python file too. It throws an error rn, but I just want to be sure. Yup, the ngrok URLs change every time you start ngrok, and /bot/ is added only in the Twilio config. Have you added the ngrok URL to your ALLOWED_HOSTSin settings.py? Is Python raising any errors or is it just a 404? If it's just a 404, you might want to check your urls.pyfiles in both the bot and the bot_app folder. The source code is available at my GitHub, so you can check against that. Perfect, I'll compare my code against your github repo and then let you know if something's up. Thanks! Hi Zhang Zeyu, Congrats! Great tutorial, I am learning a lot. I have an error: "requests.exceptions.ConnectionError: HTTPSConnectionPool(host='api.apify', port=443): Max retries exceeded with url:..." I think it is related to a restriction about apify.com. Please could you tell how to solve this? Could you suggest a tutorial to start with Apify? Thanks, nice job! Greetings from Ecuador! Nice post! It is really informative! Thank you! Hey! Nice post! It's very complete I have only one question: Is it normal to get a screen like in the pictures that I am linking with this comment? It's because I don´t know if it is normal to have the server running correctly and getting no response in my smartphone when I type "hello" but in the ngrok I got an "OK" and in the console I got "POST / HTTP/1.1" 200 16351 I let the links from my screenshots showing this. Thank you dev-to-uploads.s3.amazonaws.com/i/... dev-to-uploads.s3.amazonaws.com/i/... Nice post! :) isn't twilio too expensive for whatsapp API? Thank you! Perhaps, but I think it's good enough for a fun personal side project without having to register for a WhatsApp business account :) Thanks Zeyu, my bot is working! I did it!
https://dev.to/zeyu2001/i-built-a-python-whatsapp-bot-to-keep-me-sane-during-quarantine-nph
CC-MAIN-2020-40
refinedweb
1,978
67.45
EasyFix: Fix Error 500 when sorting users list by fullname. Review Request #10155 — Created Sept. 21, 2018 and discarded When appending ?sort=fullname to /users/, ReviewBoard attempts to query and sort on 'fullname' which is not a correct field and causes an Error 500 to occur. Although most manual sort queries will be discarded as improper, fullname is allowed as a sort field because it corresponds to a Column name on the page. This has been fixed by detecting the special case for 'fullname' in grids.py and if 'fullname' is in the sort query, it is removed and replaced with 'first_name' and 'last_name' which are its composite substrings. Tested with Review Board 3.0.x by manually attempting ?sort=fullname and ?sort=-fullname on /users/, as well as sorting by fullname and username, and going back and forth between the fullname sort cases and username sort cases. I encountered no issues on my testing once the fix was applied. Hey Jeff, I re-opened some of Barret's issues because those newlines still look like they're in the latest revision. Change Summary: Remove unnecessary linebreaks. Checks run (1 failed, 1 succeeded) flake8 Can you update your description to explain what the bug was and how your change fixes it? See for our guide on this. You should also list the bug number in the "Bugs" field, and not in the description. I just noticed what file this is in. The DataGridclass should be entirely generic--it's easily possible that someone could use this for a model that does include a column named fullname. Can we instead make the change in Review Board's reviewboard.datagrids.grids.UsersDataGrid? We can add a precomute_objectsoverride that first makes necessary changes to self.sort_listand then calls the superclass' version. David touched upon an important point, the location of where the change should be made. This change is too broad, as DataGrids are used for many things. Another thing that's important is the design of the change. This change is somewhat of a "bandaid." It's addressing a symptom of the problem, but the real problem is that we were trying to allow sorting on the fullname field (which does not have sortable=True, so it shouldn't even be possible), when it's not really a field. It's working around an improper setting. Albeit in an interesting way -- turning it into two sortable fields. So let's boil down the real issues: - We're allowing values in ?sort=that are not valid sortable columns. This needs to be fixed (and have a unit test proving it). - We want to allow sorting on the Full Name header, but we want something more specialized with the sorting (sorting by first name and then last name). Issue #1 -- Bad Sort Values The fix would be to add some validation where we're processing self.request.GET.get('sort', ...)in grids.py. It needs to be updated to make sure that each value in there is something we can sort on. Each must be something that a column could return. So an approach would be to first loop through all columns being shown on the datagrid that have sortable=Trueand then fetch the sortable database field from each, put those all in a set(), and then ignore any value in ?sort=that aren't in that set. This fixes not only ?sort=fullname, but ?sort=whatever. Issue #2 -- Smarter Sorting of Full Names Let's make a proper design for this, rather than working around a limitation in the current design. Columns can specify the database field they'd like to use for sorting through Column.get_sort_field(), which defaults to returning self.db_field. If you search, you'll find where we call get_sort_field(), and how it places the result in the list of fields to sort with. How about we update this to allow returning a list/tuple? Then we can have: class FullNameColumn(...): ... def get_sort_field(self): return ('first_name', 'last_name') The code that create the sort list can check if it got multiple items from this call, and add each independently to the sort list. This would require a change in Djblets to provide this capability (plus a matching unit test), and a second change in Review Board to define this method for the column. Summary That's three review requests: 1 for the sorting fix (fix + unit test), 1 for the Djblets multi-field sorting (implementation + unit test), 1 for defining FullNameColumn.get_sort_fieldin Review Board. Sound good? I can go over this with you on Slack if it helps. There's no need for the uprefix. We use Unicode strings by default.
https://reviews.reviewboard.org/r/10155/
CC-MAIN-2019-26
refinedweb
780
73.78
On Sun, Apr 22, 2012 at 7:51 PM, Michael Foord <fuzzyman at gmail.com> wrote: > > > On 19 April 2012 21?) > I don't know about 3.x distutils or packaging specifically, but I do know that 2.x distutils will install packages compatibly with this approach if you list the child packages but NOT the namespace package in your setup.py. So if one distribution lists 'foo.bar' and the other lists 'foo.baz', but *neither* lists 'foo', then the subpackages will be installed without a foo/__init__.py, and that will make it work. If packaging and 3.x distutils inherit this behavior from the 2.x distutils, then that would be the simplest way to do it. (And if you install to different directories, the parts will get merged.) -------------- next part -------------- An HTML attachment was scrubbed... URL: <>
https://mail.python.org/pipermail/import-sig/2012-April/000475.html
CC-MAIN-2016-40
refinedweb
140
79.16
I do have a small project, there is two arrays. Array no#1 holding ascending sorted values. This table is the input dataArray no#2 is the output data. It contains other values that are not linear but corresponds to Array no#1 index position.Unfortunately Array no#2 is not possible to calculate with the microprocessor, so I have to use arrays for the moment. The code example below is only showing test data and not real values. Worth to mention the arrays only hold around 60-80 values each. #include <math.h>#include <hashmap.h>... // some initialization going on hereint key, val;// get the integer index, which is the closest. 0 will be mapped to 10key = round(timer1/10) * 10 + 10 ;if(hashMap.hasKey(key)) val = hashMap.getValue(key);else // cover the cases key > 600 and key < 0 if(key > 600) val = hashMap.getValue(600); else val = hashMap.getValue(10); This MUST be done with HashTables to achieve the fastest way possible. Since you listed steps of 10 in array1, we assume them to be like that. int index = round(timer1/10) -10 ; a[0] = elementInOtherArrayFor10;a[1] = elementInOtherArrayFor11;a[2] = elementInOtherArrayFor12;a[3] = elementInOtherArrayFor13;... In the given example you can directly calculate the index of an array by:Code: [Select]int index = round(timer1/10) -10 ; int index = ((timer + 9) / 10) - 10; unsigned long start = micros (); int pos = -1; for (search = 10; search < 600; search +=10) { int * p; p = &array[0]; pos = -1; while (((*p) < search) ) p++; // assumes search is <= last element. if ((*p) == search) pos = (p - &array[0]); } unsigned long finish = micros (); Serial.println(pos); 58Time taken = Search Value :600Time taken = 1176 uS ==> ~20uSec per search, for (x=0, x<upperLimit; x=x+2){if (array[x] == searchTerm){output = array[x+1];}} Why not make a simpler table?array[] = {a,a1,b,b1,c,c1,d,d1,e,e1,f,f1...,};then search the array in 2s:Code: [Select]for (x=0, x<upperLimit; x=x+2){if (array[x] == searchTerm){output = array[x+1];}}Only have to go thru list once and you have the result. What's the actual range of the input data? I don't think you're going to get "a couple microseconds" doing anything more complex than a single stage lookup table, but that easily doable if your range is 0..1000 as per the example... What if you did the search where you compare to the midpoint, then 1/4 up or down, then 1/8 up or down, then 1/16 up or down, then 1/32 up or down, then 1/64 if there's still room to go.I don't recall what that search is called. Please enter a valid email to subscribe We need to confirm your email address. To complete the subscription, please click the link in the Thank you for subscribing! Arduino via Egeo 16 Torino, 10131 Italy
http://forum.arduino.cc/index.php?topic=205281.msg1510975
CC-MAIN-2016-50
refinedweb
486
64.81
On Wed, 11 Apr 2001 14:44:55 +1000, Peter Donald wrote: >At 08:48 9/4/01 -0700, David Rees wrote: >. > >I am not sure why you think such a system would be simpler. Aspect based >systems are meant to be used to give fine grain separation of concerns. How >the aspects are handled (ie Facilities in my terminology) is not set. In >essence what you propose is to reclump all aspects into one again and then >swap out facilities at runtime (ie essentially what Ant1 does with it's >magic properties/loggers). > >This of course fails to provide for large projects who want need the extra >flexibility to do their own thing. It also doesn't add anything on an >aspect based system because we could always directly configure facilities >to provide appropriate fgeatures (ie special ClassLoaderFacility for GUMP >builds, BlameFacility for Alexandria, DocFacility for AntDoc etc). > >So I can't see how it is simpler or more useful for the **users** (though >it would be simpler for us Ant developers). > I think are concepts are the same and its more of an API question rather than one of aspect orientation. As a big proponent and someone who has coded aspects into the compiler in those languages that I could (Smalltalk) I don't think I am arguing against aspects. In fact, in my experience, most aspect oriented solutions are not visible in the code at the point where they are used. Instead, they are installed/uninstalled as part of that classes configuration. What I am suggested is that context represents this API for installing/uninstalling aspects. As that they are explicitly supported in the API. I see the logfile attribute being on a LogContext element like: <Context id="detailed"> <LogContext name="current" logfile="log.txt" /> </Context> and I think you see it as a namespace delimited attribute (right?) on the task itself: <Copy log:. dave
https://mail-archives.eu.apache.org/mod_mbox/ant-dev/200104.mbox/%3Coir7dt8b9qqkd9ofpkqca0a3utrh78dsgv@4ax.com%3E
CC-MAIN-2021-21
refinedweb
318
61.87
. Depending on the compiler you use, you may be able to look at the object data and see. I don't know a generic way to do this. if ( &CMyClass::VirtMethod == &CBaseClass::VirtMethod) { // not overridden... } We value your feedback. Take our survey and automatically be enter to win anyone of the following: Yeti Cooler, Amazon eGift Card, and Movie eGift Card! That might work if I needed to know if a particlar class, like CMyClass, had overidden a function. (I don't know if it works for that even.) But my case is slightly different. I need to know if an object is of a class that does so. So I have a pointer to an object (might be of type CMyClass, might be something else) and any attempt to get a pointer to this virtual function yields a syntax error. Could be I haven't found the right syntax, could be it doesn't exist. if I understand nietod right he has something like class A {... virtual SomeType SomeFunction(SomeParameter .} class B:public class A {... virtual SomeType SomeFunction(SomeParameter .} class C:public class A {... .} GlobalFunction(A* Ptr) { // here he wants to know if function is overwritten // I did not try but I know no reason why it should not work if(&Ptr->SomeFunction==&A: { // not overwridden... } } and that has nothing to do with virtual table exploration The problem is that "&Ptr->SomeFunction" doesn't compile. I get "illegal operaton on a bound member function." Various parenthesis didn't help either. That error message suggests that this may be more of a language constaint than a syntax issue. But I'm not positive yet. sometimes by mistake i write if(ptr->SomeFunc) insted of if(prt->someFunc()) the compiler compiles that with no error perhaps if(Ptr->SomeFunc==A::SomeF works an other guess which part of the condition lets the compiler fail ? &Ptr->SomeFunc or &A::SomeFunc maybe its on A::SomeFunc The compiler may be can't Interpret that A::SomeFunc has no instanciation and its virtual perhaps the Following then works: GlobalFunction(A* Ptr) { A Dummy; if(&Ptr->SomeFunction==& { // not overwridden... } } PS: Sorry for answerring but I am on vacation for the next three weeks and if that above works and you ask me to answer you have a open question for above three weeks - I am not sure how long the period is until the question is auto deleted BTW Pointers to members can only be used in conjunction with instances of the class (for an example, see '') Ask Yonat... I suspect that norbert's isn't any better, but I will try to be sure. Alex, you sound very confident do you know something I don't? (well, probably lots but...) To the 'object' problem: Pointers to members can only be used with a 'clumsy' syntax, e.g. class CMyClass { public: void VoidMemberTakingNothing(){ }; typedef void ( CMyClass::*PMYCLASSMETHOD) CMyClass c; PMYCLASSMETHOD p = &CMyClass::VoidMemberTakin (c.*p)(); (But better see the URL, it's elaborated a bit more there... ;-) Huh? I made two comments: that the currently suggested solution doesn't work and that you should ask Yonat because she knows more about C++ than any other EE expert. True, I'm very confident about both statements but you do know them to be true. I have worked out a solution this way. I have compiled and executed it with a VC++ compiler and it works. Here is the code #include <stdio.h> class oo{ public: virtual int a(){return 0;} }; typedef int (oo::*f1)(); f1 baseptr=&oo::a; class pp:public oo{ public: int a(){return 0;} }; typedef int (pp::*f2)(); f2 childptr=&pp::a; main(){ oo *ptr=new oo; if(baseptr==ptr->a) puts("DADDY"); ptr=new pp; if(childptr==ptr->a) puts("KID"); return 0; } Hope it helps. Thanks pagladasu Try the following: ptr = new pp; if (baseptr == ptr->a) puts("DADDY"); if (childptr == ptr->a) puts("KID"); Both cases evaluate as true. Just again - i thought of using something like this: #include <stdio.h> class CSimple { public: CSimple () {}; ~CSimple () {}; virtual void VirtMeth () { printf ( "base virtual method\n");}; }; class CDerivedOnly: public CSimple { public: CDerivedOnly () {}; ~CDerivedOnly () {}; }; class CDerivedAndOverridden: public CSimple { public: CDerivedAndOverridden () {}; ~CDerivedAndOverridden () {}; virtual void VirtMeth () { printf ( "derived virtual method\n");}; }; void main ( void) { CSimple cs; CDerivedOnly cdo; CDerivedAndOverridden cda; cs.VirtMeth (); cdo.VirtMeth (); cda.VirtMeth (); if ( &CSimple::VirtMeth == &CDerivedOnly::VirtMeth) printf ( "virtual method NOT overridden\n"); else printf ( "virtual method overridden\n"); if ( &CSimple::VirtMeth == &CDerivedAndOverridden::Vi printf ( "virtual method NOT overridden\n"); else printf ( "virtual method overridden\n"); } As i said, doesn't work, i just wanted to contribute it as an addition ... 1. Explain why the usual solutions do not work (some C++ mechanics). 2. Throw more objects and classes at the problem. 3. Question the question (my usual "do you really want to do that?" routine). 1. Why the usual solutions do not work. A pointer to a virtual member function is usually implemented as an offset in the vtable, so it's not really a pointer at all. The only thing you can do with this offset, is use it with a specific object to invoke the member function, but not to get the function's address. Non-portably, you can get the address of the vtable for a specific object, add the offset, and check what's there. But I know of no portable way to do this. 2. Throw more objects and classes at the problem You can replace the member function with a function object ("functor" or "command object"). 3. Question the question Why do you need to know if the function is overridden? Are you violating the Liskov substitution principle? The "legal" answer for this is "I really need reflection, because I'm writing a library for XXX". So what's XXX? Is it possible to do without reflection? (tradeoffs, tradeoffs…) The reason I am asking this is I have a case where derived classes can handle a group of "notifications" from base class by implenting a virtual function. If they chose to handle the notification (overide the virutal function) then there is some "common tasks" that need to be performed at the start and end of the processing, but if they don't choose to override the function, those task should not be performed. I can let the overide function call procedures to perform these common tasks, but this is for a library and If a programmer forgot...it would be bad. In addition, with this sort of approach, a derived class could not call a base class's version of the function to do some of the processing as it would call the "common code" again. I want to try to make this as idiot proof as possible as there will be many idiots using the library--and I'll be one of them. I did not think of using a functor for this because this actually is for a functor (of sorts--I guess it is a "fancy functor"). It has posibilities, but I may just "give up" and require the programmer to be a little more careful. if (itsFunctor) { InitStuff(); itsFunctor(); CleanupStuff() } The user-programmer writes the a functor and passes it to the constructor or some other way. But here is a semi-portable code (it will probably work as-is on many compilers, but it may just as well may break on others - I would advise against using it). template <class BaseClass> void* GetVirtualFuncPtr( // non portable! const BaseClass& object, unsigned memberFuncIdx) { // Find the pointer to the vtable // (assume it's at the beginning of the object and that // its length is the same as size_t) size_t vptr = *( reinterpret_cast<const size_t*>(&object) ); // Look at the vtable as a simple array of function pointers typedef void (*FuncPtr)(); FuncPtr* vtable = reinterpret_cast<FuncPtr*> return vtable[memberFuncIdx]; } template <class BaseClass, class DerivedClass> bool OverridesVirtualFunc( const BaseClass& baseObject, const DerivedClass& object, unsigned memberFuncIdx) { static void* basefptr = GetVirtualFuncPtr(baseObje // static is only an optimization const BaseClass& derivedObject = object; // ensures DerivedClass is derived from BaseClass, // and also handles multiple inheritence void* fptr = GetVirtualFuncPtr(derivedO return (fptr != basefptr); } class Base { public: virtual void Foo() { int x = 7; ++x; } }; class NotOverrides : public Base { }; class Overrides : public Base { public: void Foo() { int y = 42; --y; } }; #include <cassert> int main() { const unsigned cBaseFooIndex = 0; // fragile whenever Base changes Base b; NotOverrides no; Overrides o; assert( false == OverridesVirtualFunc(b, b, cBaseFooIndex) ); assert( false == OverridesVirtualFunc(b, no, cBaseFooIndex) ); assert( true == OverridesVirtualFunc(b, o, cBaseFooIndex) ); return 0; } Are there any compilers which break this assumption? - The vtable may contain the RTTI stuff, and not only the pointers to the virtual functions. If the length of the RTTI stuff is constant, then you can cover up for it by adjusting the memberFuncIdx, though. - The vtable may be near/far while regular pointers to functions are far/near. (this is not a dead issue - it is still important in many embedded systems. It may also be important in 64-bit architectures.) - The vtable is in a different component (like a DLL or something) and so contains a special kind of reference which is different from a normal pointer. (OK, I'm letting my imagination run wild...) In any case, the standard does not guarntee anything about the vtable, so you can't rely on it being one thing or another. Since, Norbert's answer(s) was not right, I'm rejecting it. Althought I'm not using Yonat's answers, I think they were the best, so I'd like you to answer. They reason I'm not using that approach was that I am trying to consolidate things to reduce the number of classes and types in my library (where reasonable) in an effort to give VC a hand in compiling. (My goal is to get it to compile the library in less time than it took to write it!) So I had collected a bunch of function pointers that perform related tasks (fucntion pointers left over from my Pre C++ days) into a single "Functor" class that contained several virutal functions. I don't want to add real functors to this class as, I'm leaving on vacation for a week and I'm hopping it can compile the 80,000 lines or so, while I'm gone. class Base { public: void DoSomething(); // Do not override! private: virtual void ActualOperation() {} // Users can override this one. }; void Base::DoSomething() { // Do startup processing... ActualOperation(); // Do cleanup processing... } Users will provide customized private ActualOperation() methods. Of course, that's one more class in an effort to minimize the number of classes...
https://www.experts-exchange.com/questions/10109775/Determined-if-a-virtual-function-is-overloaded.html
CC-MAIN-2018-05
refinedweb
1,763
61.06
Opened 3 years ago Closed 3 years ago Last modified 3 years ago #10224 closed bug (fixed) Partial type signatures generate typed hole warnings Description GHCi generates typed hole warnings in type signatures even when PartialTypeSignatures is enabled: > :set -XPartialTypeSignatures > :t (==) :: Char -> _ <interactive>:1:17: Warning: Found hole ‘_’ with type: Char -> Bool In an expression type signature: Char -> _ In the expression: (==) :: Char -> _ (==) :: Char -> _ :: Char -> Char -> Bool > :set -XNoPartialTypeSignatures > :t (==) :: Char -> _ <interactive>:1:17: Found hole ‘_’ with type: Char -> Bool To use the inferred type, enable PartialTypeSignatures In an expression type signature: Char -> _ In the expression: (==) :: Char -> _ Similarly, GHC 7.10.1, when given the following source, {-# LANGUAGE PartialTypeSignatures #-} f = (==) :: Char -> _ main = return () mentions that it "found a hole". Change History (7) comment:1 Changed 3 years ago by comment:2 Changed 3 years ago by I think mpickering is right. Seem ok? I'll close as invalid, but reopen if you think this is wrong. Simon comment:3 Changed 3 years ago by I find the warning message quite misleading. Here is my (uninteresting) case: foo :: _ -> Bool foo True = False foo False = True I get this warning (with -XPartialTypeSignatures) Scratch.hs:18:8: warning: Found hole: _ :: Bool In the type signature for ‘foo’: _ -> Bool I think of "hole" as something in an expression instead of a type. In a type, I thought it was called a "wildcard". In any case, _ :: Bool is wrong. My _ certainly does not have type Bool. It has value Bool. Here is my suggested output: Scratch.hs:18:8: warning: Found type wildcard: `_' standing in for `Bool' In the type signature for ‘foo’: _ -> Bool I'm reopening on the basis that, even if my hole vs. wildcard distinction is silly nitpicking, saying _ :: Bool is wrong and should be fixed. comment:4 Changed 3 years ago by comment:5 Changed 3 years ago by Good idea I think this is the expected behaviour. From the user guide:
https://ghc.haskell.org/trac/ghc/ticket/10224
CC-MAIN-2018-09
refinedweb
338
68.2
Structure of C++ Program The C++ program is written using a specific template structure. The structure of the program written in C++ language is as follows: Documentation Section: - This section comes first and is used to document the logic of the program that the programmer going to code. - It can be also used to write for purpose of the program. - Whatever written in the documentation section is the comment and is not compiled by the compiler. - Documentation Section is optional since the program can execute without them. Below is the snippet of the same: C++ Linking Section: The linking section contains two parts: Header Files: - Generally, a program includes various programming elements like built-in functions, classes, keywords, constants, operators, etc. that are already defined in the standard C++ library. - In order to use such pre-defined elements in a program, an appropriate header must be included in the program. - Standard headers are specified in a program through the preprocessor directive #include. In Figure, the iostream header is used. When the compiler processes the instruction #include<iostream>, it includes the contents of the stream. #include<iostream> Namespaces: - A namespace permits grouping of various entities like classes, objects, functions, and various C++ tokens, etc. under a single name. - Any user can create separate namespaces of its own and can use them in any other program. - In the below snippets, namespace std contains declarations for cout, cin, endl, etc. statements. using namespace std; - Namespaces can be accessed in multiple ways: - using namespace std; - using std :: cout; Definition Section: - It is used to declare some constants and assign them some value. - In this section, anyone can define your own datatype using primitive data types. - In #define is a compiler directive which tells the compiler whenever the message is found replace it with “Factorial\n” . - typedef int K; this statement telling the compiler that whenever you will encounter K replace it by int and as you have declared k as datatype you cannot use it as an identifier. Global Declaration Section: - Here the variables and the class definitions which are going to be used in the program are declared to make them global. - The scope of the variable declared in this section lasts until the entire program terminates. - These variables are accessible within the user-defined functions also. Function Declaration Section: - It contains all the functions which our main functions need. - Usually, this section contains the User-defined functions. - This part of the program can be written after the main function but for this, write the function prototype in this section for the function which for you are going to write code after the main function. C++ Main Function: - The main function tells the compiler where to start the execution of the program. The execution of the program starts with the main function. - All the statements that are to be executed are written in the main function. - The compiler executes all the instructions which are written in the curly braces {} which encloses the body of the main function. - Once all instructions from the main function are executed control comes out of the main function and the program terminates and no further execution occur. Below is the program to illustrate this: C++ Output FACTORIAL 5! = 120
https://www.geeksforgeeks.org/structure-of-c-program/?ref=leftbar-rightbar
CC-MAIN-2021-21
refinedweb
541
54.02
Epetra_CrsGraph: A class for constructing and using sparse compressed row graphs. More... #include <Epetra_CrsGraph.h> Epetra_CrsGraph: A class for constructing and using sparse compressed row graphs.: In all but the most advanced uses, users will typically not specify the column map. In other words, graph entries will be submitted using GIDs not LIDs and all entries that are submitted are intended to be inserted into the graph. If a user is not particularly worried about performance, or really needs the flexibility associated with the first situation, then there is no need to explicitly manage the NumIndicesPerRow values or set StaticProfile to true. In this case, it is best to set NumIndicesPerRow to zero. Users who are concerned about performance should carefully manage NumIndicesPerRow and set StaticProfile to true. This will give the best performance and use the least amount of memory. Epetra_Map attributes Epetra_CrsGraph objects have four Epetra_Map attributes... Copy constructor. This will create a Level 1 deep copy. This Graph will share ownership of the CrsGraphData object with the right hand side Graph. Returns the Column Map associated with this graph. Returns a pointer to the CrsGraphData instance this CrsGraph uses. (Intended for developer use only for testing purposes.) Extract a list of elements in a specified global row of the graph. Put into storage allocated by calling routine. Get a view of the elements in a specified global row of the graph. This function requires that the graph not be completed (FillComplete() was not called). Extract a list of elements in a specified local row of the graph. Put into storage allocated by calling routine. Get a view of the elements in a specified local row of the graph. This function requires that the graph be completed FillComplete() was called). Transform to local index space using specified Domain/Range maps. Perform other operations to allow optimal matrix operations. Performs this sequence of operations: Tranform to local index space. Perform other operations to allow optimal matrix operations. This overloading of the FillComplete method assumes that the domain-map and range-map both equal the row-map, and simply calls FillComplete(RowMap(), RowMap()). Returns the global column index for give local column index, returns IndexBase-1 if we don't have this local column. Returns the maximun number of nonzero points across all rows across all processors. This function returns the max over all processor of MaxNumNonzeros(). Returns true if we have a well-defined ColMap, and returns false otherwise. Enter a list of elements in a specified global row of the graph. Enter a list of elements in a specified local row of the graph. Returns the local column index for given global column index, returns -1 if no local column for this global column. Returns the maximum number of nonzero points across all rows on this processor. For each entry in the graph, let i = the GRID of the entry and j = the CGID of the entry. Then the entry size is the product of the rowmap elementsize of i and the colmap elementsize of i. Let ki = sum of all entry sizes for the entries in the ith row. For example, if the ith block row had 5 block entries and the element size of each entry was 4-by-4, ki would be 80. Then this function returns the max over all ki for all row on this processor. Returns true if the GCID passed in belongs to the calling processor in this map, otherwise returns false. Returns true if the LRID passed in belongs to the calling processor in this map, otherwise returns false. Returns the number of indices in the global graph. Note that if the graph's maps are defined such that some nonzeros appear on more than one processor, then those nonzeros will be counted more than once. If the user wishes to assemble a graph from overlapping data, they can use Epetra_FECrsGraph. Returns the number of entries in the set of column-indices that appear on this processor. The set of column-indices that appear on this processor is the union of column-indices that appear in all local rows. The size of this set isn't available until FillComplete() has been called. Assignment operator. This will do a Level 1 deep copy. It will share ownership of the CrsGraphData with the right hand side Graph. Inlined bracket operator for fast access to data. (Const and Non-const versions). No error checking and dangerous for optimization purposes. Make consecutive row index sections contiguous, minimize internal storage used for constructing graph. After construction and during initialization (when indices are being added via InsertGlobalIndices() etc.), the column- indices for each row are held in a separate piece of allocated memory. This method moves the column-indices for all rows into one large contiguous array and eliminates internal storage that is not needed after graph construction. Calling this method can have a significant impact on memory costs and machine performance. If this object was constructed in View mode then this method can't make non-contiguous indices contiguous and will return a warning code of 1 if the viewed data isn't already contiguous. Returns the reference count of CrsGraphData. (Intended for testing purposes.) Remove all indices from a specified global row of the graph. Remove a list of elements from a specified global row of the graph. Remove all indices from a specified local row of the graph. Remove a list of elements from a specified local row of the graph. Removes any redundant column indices in the rows of the graph. Forces FillComplete() to locally order ghostnodes associated with each remote processor in ascending order. To be compliant with AztecOO, FillComplete() already locally orders ghostnodes such that information received from processor k has a lower local numbering than information received from processor j if k is less than j. SortGhostsAssociatedWithEachProcessor(True) further forces FillComplete() to locally number all ghostnodes received from processor k in ascending order. That is, the local numbering of b is less than c if the global numbering of b is less than c and if both b and c are owned by the same processor. This is done to be compliant with some limited block features within ML. In particular, some ML features require that a block structure of the matrix be maintained even within the ghost variables. Sort column indices, row-by-row, in ascending order.
http://trilinos.sandia.gov/packages/docs/r10.4/packages/epetra/doc/html/classEpetra__CrsGraph.html
CC-MAIN-2014-10
refinedweb
1,069
56.96
Jupyter Notebooks¶ Acknowledgements¶ The material in this tutorial is specific to PYNQ. Wherever possible, however, it re-uses generic documentation describing Jupyter notebooks. In particular, we have re-used content from the following example notebooks: - What is the Jupyter Notebook? - Notebook Basics - Running Code - Markdown Cells The original notebooks and further example notebooks are available at Jupyter documentation. Introduction¶ If you are reading this documentation from the webpage, you should note that the webpage is a static html version of the notebook from which it was generated. If the PYNQ platform is available, you can open this notebook from the getting_started folder in the PYNQ Jupyter landing page. electronically, using The Notebook supports a range of different programming languages. For each notebook that a user opens, the web application starts a kernel that runs the code for that notebook. Each kernel is capable of running code in a single programming language. There are kernels available in the following languages: - Python - Julia - R - Ruby - Haskell - Scala - node.js - Go. Most users don’t need to know about these details, but its important to understand that kernels run on Zynq, while the web browser serves up an interface to that kernel. Notebook Documents¶ Notebook documents contain the inputs and outputs of an interactive session as well as narrative text that accompanies the code but is not meant for execution. Rich output generated by running code, including HTML, images, video, and plots, is embedded: Deprecated. Headings are supported in Markdown cells -. Some of documentation for Pynq, including this page, was written in a Notebook and converted to html for hosting on the project’s documentation website.. GitHub also renders notebooks, so any Notebook added to GitHub can be viewed as intended. Notebook Basics¶ The Notebook dashboard¶ The Notebook server runs on the ARM® processor of the PYNQ-Z1. You can open the notebook dashboard by navigating to pynq:9090 when your board is connected to the network. The dashboard serves as a home page for notebooks. Its main purpose is to display the notebooks and files in the current directory. For example, here is a screenshot of the dashboard page for an example directory: The top of the notebook list displays clickable breadcrumbs of the current directory. By clicking on these breadcrumbs or on sub-directories in the notebook list, you can navigate your filesystem. To create a new notebook, click on the “New” button at the top of the list and select a kernel from the dropdown (as seen below)._1<<: The Jupyter Notebook has a modal user interface which means that the keyboard does different things depending on which mode the Notebook is in. There are two modes: edit mode and command mode. Edit mode¶ Edit mode is indicated by a green cell border and a prompt showing in the editor area: _5<<. Running Code¶ First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. Pynq, and this notebook is associated with the IPython kernel, which runs Python code. Code cells allow you to enter and run code¶ Run a code cell using Shift-Enter or pressing the play button in the toolbar above. The button displays run cell, select below when you hover over it. In [1]: a = 10 In [ ]: print(a) There are two other keyboard shortcuts for running code: Alt-Enterruns the current cell and inserts a new one below. Ctrl-Enterrun the current cell and enters command mode. Managing the Kernel¶ Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the stop button in the toolbar above. The button displays interrupt kernel when you hover over it. In [ ]: import time time.sleep(10) Restarting the kernels¶ The kernel maintains the state of a notebook’s computations. You can reset this state by restarting the kernel. This is done from the menu bar, or by clicking on the corresponding button in the toolbar. sys.stdout¶ The stdout and stderr streams are displayed as text in the output area. In [ ]: print("Hello from Pynq!") Output is asynchronous¶ All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end. In [ ]: import time, sys for i in range(8): print(i) time.sleep(0.5) Markdown inline or displayed on their own line. Inline expressions can be added by surrounding the latex code with $: Inline example: $e^{i\pi} + 1 = 0$ This renders as: Inline example: \(e^{i\pi} + 1 = 0\) Expressions displayed on their own line are surrounded by $$: $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$ This renders as: If you have local files in your Notebook directory, you can refer to these files in Markdown cells directly: [subdirectory/]<filename> Security of local files¶ Note. For more information, see Jupyter’s documentation on running a notebook server.
https://pynq.readthedocs.io/en/v2.0/jupyter_notebooks.html
CC-MAIN-2019-26
refinedweb
856
63.7
hi, regarding the silver challenge for Arrays - use a For loop to reverse the order of the elements in an array. How would i approach this ?? take them off the end and stick them on the front??? i’m a bit confused about where to start here. thanks for any hints ! I was kind of confused on that one too, here is the loop: for item in toDolist.reverse(){ print(item) } and simplified: toDolist = toDolist.reverse() simple as that!! thank you!! This was my long form solution using a for loop. var toDoList = ["Take out the garbage", "Pay bills", "Cross off finished items"] var reverseArray = [String]() for item in toDoList { reverseArray.insert(item, atIndex: 0) } print(reverseArray) // ["Cross off finished items", "Pay bills", "Take out the garbage"] Simpler solution was identical to yours. toDoList = toDoList.reverse() print(toDoList) [quote=“badbluberries”]This was my long form solution using a for loop. [/quote] Wow this is a much easier solution than mine. Should have thought of that! import Cocoa var toDoList = ["Take out garbage", "Pay bills", "Cross off finished items"] // Test the reversal by adding more elements here var easierWay: Array<String> = toDoList.reverse() // The automated reversal var elementCount = toDoList.count // Get the amount of elements in the array elementCount -= 1 // Decrease one because arrays start at 0 var currentPosition = 0 var reverseElement = "" // The var where we will store the overwritten element temporarily var loopAmount = elementCount/2 // Amount of loops necessary to reverse an array print("Before: \(toDoList)") for var i = loopAmount; i > 0; i-- { reverseElement = toDoList[currentPosition] // Store the current element toDoList[currentPosition] = toDoList[elementCount] // Overwrite the current element toDoList[elementCount] = reverseElement // Restore the overwritten element at the mirrored index elementCount-- currentPosition++ } print("After: \(toDoList)") // This is reversed by our loop print("Control array: \(easierWay)") // This is the automated reversal Just where in the documentation is reverse documented? Google search finds it on other forums but I draw a blank with the Apple documentation. reverse() is a method on CollectionType. Docs are here: developer.apple.com/library/tvo … index.html Got it, thanks… Just thought I would share on the first part of the challenge. I was playing around and happened to stumble my way into this solution using only the functions/methods shown in this and previous chapters. Basically copying an item to the end of the array and removing the item that was just copied. [code]for var i = toDoList.count-1 ; i >= 0 ; i-- { toDoList.append(toDoList[i]) toDoList.removeAtIndex(i) } print(toDoList)[/code] Here is the simplest solution I could find using for loop: for i in 0...(toDoList.count-1) { toDoList.insert(toDoList.popLast()!, atIndex: i) } I managed to get SortInPlace to work (eventually): toDoList.sortInPlace { $0.capitalizedString > $1.capitalizedString } But reverse() is far easier! Actually, you can end the for loop one iteration earlier. In the above, the final iteration pops off the last string in the array and then puts that same string back into the last position. Hence, the last iteration does not actually result in a change to the array. Thus, you can use: for i in (0…toDoList.count-2) { toDoList.insert(toDoList.popLast()!, atIndex: i) } for i in (0…toDoList.count-2) { toDoList.insert(toDoList.popLast()!, atIndex: i) }
https://forums.bignerdranch.com/t/book-silver-challenge-arrays/8122
CC-MAIN-2018-05
refinedweb
538
56.76
04-05-2010 02:02 PM I inhereted an app that used a previous version of the SDK (2008?) and am trying to get it to work with the 2010 SDK. I'm not even sure where to start on some of the obsoleted. For a simple example, I have a call to Opportunity(instance).RemoveProduct. The help consists only of public void RemoveProducts( Guid key, OpportunityProduct[] products ) I have no idea what GUID it is looking for or how to get a OpportunityProduct array from the Opportunity. And then there are Methods that are not there at all anymore and have no apparent replacement, such as SetOpportunityField. Is there a document anywhere that may get me pointed in the right direction or is this going to have to be a complete re-write? I work with quite a few SDKs and this is, by far, the worst combination of poor help documents and lack of backward compatibility I've ever seen. 04-06-2010 08:09 AM - edited 04-06-2010 08:10 AM Not knowing what your accomplishing it difficult to say whether or not a complete rewrite will be necessary. Opportunities did changed significantly between the 2009 and 2010 versions so hopefully that's the only place your running into obsolete methods. The Guid parameter is the ID for the opportunity but I don't think you'll need it, here is an example I've hastily thrown together that will delete products from opportunities. OpportunityList ol = afw.Opportunities.GetOpportunities(null); Opportunity o = ol[3]; CustomEntityList<OpportunityProduct> productList = o.GetProducts(null); OpportunityProduct op = productList[0]; OpportunityProduct[] opRay = new OpportunityProduct[1];opRay[0] = op; try { o.UpdateProducts(null, opRay); } catch (Exception x) { MessageBox.Show(x.ToString()); } 04-07-2010 11:14 AM - edited 04-07-2010 11:21 AM The problem I had with all the Get... functions is that the CustomEntityList class is not found, so it won't event compile. I am using VS 2005 (not my choice) and a version of the SDK that may or may not be the most current. That class is not listed in the newest ACT! SDK help or C# MSDN library. It also does not like any of my attempted commands of the form: (Assume opp is an instatiated Opportunity object and prodTot is a decimal value) opp.Fields["ProductTotal"] = prodTot; I am getting the name from the enumeration. I have tried all the Field collections and get the same error: Invalid name format: ProductTotal Parameter name: realName Act.Framework at Act.Framework.MutableEntities.MutableEntityMetaData.GetNameParts(String name, String& tableName, String& columnName) at ... 04-07-2010 11:42 AM CustomEntityList should be part of the Act.Framework.CustomEntities namespace, but your right, my search on the index of the SDK reference documentation comes up empty, I'll have to look into that. I ran into similar issues with Contact.Fields["Contact"] -- threw an identical error. I had less issues working with field aliases, not sure if it's the same for Opportunities but Contact.Fields["Contact", Act.Framework.MutableEntities.FieldNameType.Alias]; got it done for me. 04-07-2010 01:19 PM Hi Bob, In your Project References, what version of the Act.Framework are you using? I'm being a bit thick here so please bear with me, I can't work out if you mean you are using the ACT! 12 SDK documentation against the Act.Framework.dll for ACT! 2008 or the other way around or are using the ACT! 12 Act.Framework.dll and are trying to work against an ACT! 2008 installation! With regards to trying to get the Product Total for the Opportunity, you could get the property ProductTotal: decimal d = opp.ProductTotal; 04-07-2010 03:20 PM Both the SDK and Framework I am talking about are version 12. The incomplete, no-comments, code that I am trying to get to work was written against 2008. I am making some progress, but continuing to hit road blocks. I'm also getting some weird stuff happening where members don't show up in the VS intellisense and then they do. Shortly after my last post the Product Total showed up. I know I looked for it many times and it was not there. I'm assuming thats some quirk of VS2005 and not spending any time figuring out the cause of that. I was able to set the ProductTotal value.However, when I look at the ACT! interface, the Totals field in the upper left corner of the Detail view remains 0. If I do an opp.Update() (where opp is the instantiated Opportunity), then it doesn't save the Opportunity at all, or saves it to never-never land. I don;t see it in the UI anyway. My guess is that is a calculated field based on the sum of the added OpportunityProducts, although no error occured anywhere in the process. Ignoring that for now, I was moving on to populating the OpportunityProducts. It appears that I need to fill an array of OpportunityProduct instantiations and send it into the UpdateProducts method. I have everything coded for that to happen except making a blank OpportunityProduct. The constructor is looking for a parameter of type Act.Framework.CustomEntities.CustomSubEntity.CustomSubEntityInitializationState. I can find no valid values for that class, so that long road appears to have lead to a dead-end. I'm trying to work things out the best I can before resorting to asking here. The problem with the Update for example, a person would almost need to see all the surrounding code to really be able to see any problems there. When I can isolate things down to specific commands, that's when I'd expect the people on this forum might be able to use their experience to possibly help me. 04-08-2010 01:06 AM Hi Bob, I feel your pain! Anyway I have had a bit of look through the SDK and I think the design is towards not instantiating a new OpportunityProduct object but to get a Product as a OpportunityProduct (I think I can see where they are coming form with this design in a way, but yes should be documented). So let's say we have our Opportunity opp and we know the "name" of the Product we want to associate with opp here we go: ActFramework oFram;Opportunity opp; Product prod; ... prod = oFram.Products.GetProduct("myProductStringName"); CustomEntityList<OpportunityProduct>myOppProds = oFram.Products.GetOpportunityProduct(prod.ID); So once you have the List<> you can do what you want with the contents etc and finally call a opp.UpdateProducts(myOppProd,null); HTH 04-09-2010 09:39 AM 04-09-2010 01:42 PM I can't think of a single application use case scenario where you could have an OpportunityProduct object without an associated Product, unless of course you have some strange level of corruption. If however you are presented possibly with the name of a Product that is not part of the product list you could of course check for null through the returned ProductList via the ProductManager.GetProducts(). It might be an idea to have a quick read through the Application Help files to get an idea of how Opportunities and Products are used within ACT!. 04-09-2010 03:03 PM
https://community.act.com/t5/Act-Developer-s-Forum/Replacements-for-obsoleted-functions/m-p/68274
CC-MAIN-2020-45
refinedweb
1,222
55.54
Ticket #2347 (closed Bugs: fixed) 'Bus error' on OS X when combining ublas and thread Description The following code compiles but gives an error on OS X 10.5.5: #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/thread.hpp> int main (int argc, char * argv[]) { boost::numeric::ublas::coordinate_matrix<double> cm; cm.resize(3,3, false); return 0; } Jamroot is: lib boost_thread : : <file>/Users/djp/sg/sage-3.1.1/local/lib/boost/libboost_thread-xgcc40-mt-1_36.a ; exe ut : main.cpp boost_thread : <include>boost ; The program does not crash if: (1) I don't link to boost.thread, or remove boost/thread.hpp from main.cpp. (2) I don't 'strip' the executable before running it. The error is independent of whether I link statically or dynamically, and whether I compile in 'debug' or 'release' mode. (Though it doesn't appear in 'debug' mode unless I strip the executable myself.) The error doesn't depend on coordinate matrix, it happens just the same if I use a ublas::vector (and include the relevant header.) Anyway, the above is about as small an example as I can figure out, and I certainly don't have the skills to go any further. Attachments Change History comment:2 Changed 8 years ago by Dave Philp <dphilp@…> The problem is that 'strip' should not be run without options---doing this strips too much out of the binary. Running strip with the -S and -x options seems to be appropriate on OS X but I don't have the expertise to verify this. (Thanks to Zeljko Vrba for figuring this out.) comment:3 Changed 8 years ago by Dave Philp <dphilp@…> I would appreciate being kept in the loop wrt this bug, please send me an email when you get around to looking at it. D comment:4 Changed 8 years ago by vladimir_prus - Owner set to vladimir_prus - Component changed from None to build Do I understand correctly that if you build in debug mode, application works, but if you strip it with just 'strip', it crashes? What happens if you strip with "strip -u -r"? Does the application actually work if stripped with "strip -S -x"? Can you run 'nm' on a binary that crashes and a binary that does not, and post both outputs here? comment:5 Changed 8 years ago by Dave Philp <dphilp@…> Do I understand correctly that if you build in debug mode, application works, but if you strip it with just 'strip', it crashes? Correct. What happens if you strip with "strip -u -r"? Bus error. Does the application actually work if stripped with "strip -S -x"? Well, the application above doesn't actually do much! But it doesn't crash. If I add in a few headers and send the cm matrix to std::cout, everything works as expected: #include <boost/thread.hpp> #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/numeric/ublas/io.hpp> #include <iostream> int main (int argc, char * argv[]) { boost::numeric::ublas::coordinate_matrix<double> cm; cm.resize(3, 3, false); cm(0, 0) = 1; cm(1, 1) = 2; cm(2, 2) = 3; std::cout << cm << std::endl; return 0; } In the above, if you compile and run with bjam debug and strip (no options), it crashes before giving any output. (If you comment out #include boost/thread.hpp, it works as expected.) Changed 8 years ago by Dave Philp <dphilp@…> - attachment crasher.txt added Changed 8 years ago by Dave Philp <dphilp@…> - attachment non-crasher.txt added comment:6 Changed 8 years ago by Dave Philp <dphilp@…> I have attached the output of "nm" to this ticket. It's for the new version above with the io stuff. I've never used nm before so don't know if this is what you're after. comment:7 Changed 8 years ago by vladimir_prus - Status changed from new to closed - Resolution set to fixed Thanks, the output you've posted is what I was looking for. Diffing the outputs gives this: --- non-crasher.txt 2008-09-30 09:56:43.000000000 +0400 +++ crasher.txt 2008-09-30 09:56:26.000000000 +0400 @@ -1,8 +1,4 @@ -0000f00c D _NXArgc -0000f008 D _NXArgv -0000c812 s __GLOBAL__I_main U __Unwind_Resume -0000c780 s __Z41__static_initialization_and_destruction_0ii .... -0000f000 D ___progname -00002346 t ___tcf_0 -00002124 t __dyld_func_lookup 00001000 A __mh_execute_header -0000f004 D _environ U _exit -00002132 T _main -0000f010 d dyld__mach_header -00002110 t dyld_stub_binding_helper -000020d0 T start which surely looks very scary. I was not able to find a definite explanation why strip breaks a binary like this, so I presume we're using strip in some way it was not meant to, which breaks on newer OSX. But to make progress, I've switched to use -S -x. Feel free to open another ticket if you find a "better" way to strip binaries. I should have mentioned: I have not filed it against ublas, thread, or even build because I have no idea which is causing the problem. I'm happy to help chase it down, and can be contacted at dphilp@…
https://svn.boost.org/trac/boost/ticket/2347
CC-MAIN-2017-09
refinedweb
842
73.68
#include "univ.i" #include "ut0byte.h" #include "ut0lst.h" #include "trx0trx.h" #include "read0types.h" #include "read0read.ic" Go to the source code of this file. Cursor read Created 2/16/1997 Heikki Tuuri High-granularity read view where transaction does not see changes made by active transactions and own changes after a point in time when this read view was created. Read view types Normal consistent read view where transaction does not see changes made by active transactions except creating transaction. This function sets a given consistent cursor view to a transaction read view if given consistent cursor view is not NULL. Otherwise, function restores a global read view to a transaction read view. in: consistent cursor view to be set Close a given consistent cursor view for mysql and restore global read view back to a transaction read view. in: cursor view to be closed Create a consistent cursor view for mysql to be used in cursors. In this consistent read view modifications done by the creating transaction or future transactions are not visible. in: trx where cursor view is created Closes a consistent read view for MySQL. This function is called at an SQL statement end if the trx isolation level is <= TRX_ISO_READ_COMMITTED. in: trx which has a read view Opens a read view where exactly the transactions serialized before this point in time are seen in the view. Prints a read view to stderr. in: read view Makes a copy of the oldest existing read view, or opens a new. The view must be closed with ..._close. Remove a read view from the trx_sys->view_list. in: true if caller owns the trx_sys_t::mutex Remove a read view from the trx_sys->view_list. Checks if a read view sees the specified transaction.
http://mingxinglai.com/innodb-annotation/read0read_8h.html
CC-MAIN-2018-17
refinedweb
294
66.94
It's not the same without you Join the community to find out what other Atlassian users are discussing, debating and creating. my jira version is 4.4.1. i tried to use workflows' Validators (Field Required for Time Spent),but failed. even i have log work,"Workflow Error" still says Time Spent is required when Resolve anyone can help? Use Script Runner plugin and add a validator to check whether the total spent time is more than zero. This link may help with the code Sorry Fabby, it's very tough to understand what you really mean by that statement. If you don't mind can you please rephrase? And why don't you check @mizan 's suggestion? I guess that is a better. hi,Renjith code in this link try to get Time Spent in History Tab,but when delete some work logs, "change history" can't reflect actual Time Spent value sorry of my poor English. i add below code to Script Validator of workflow using Script Runner Plugin import com.atlassian.jira.issue.Issue issue.getTimeSpent() and it meets my requirement. Actually for anyone checking in this is better as it allows you to log work in the edit screen as opposed to fabbys solution which requires already logged work prior to triggering the transition: import com.atlassian.jira.issue.Issue issue.getTimeSpent() || issue.getModifiedFields().get("worklog") Hello, this is working fine, but it's not when the ticket has sub-tasks with logged time. Any idea? I have been using " issue.getModifiedFields(). get ( "worklog" )" in a script validator for a workflow transition to determine if a user has added 'time worked' in the transition. This has been working fine for several months. I have recently upgraded to ScriptRunner v4.1.3.14 and the ScriptRunner editor now gives me the following error on this code "Cannot find matching method com.atlassian.jira.issue.Issue#getModifiedFields()." I find "getModifiedFields()" in the MutableIssue class, see, and my understanding was that this came into the script in the binding for scripts in the workflow transitions. Do I need to declare a new mutable issue from the issue passed in the script binding and then use "getModifiedFields()"? I am wondering if anyone has a solution to this that works with the newer version of ScriptRunner? Thank you for ant help. I am having the same issue as Will. Anyone have any ideas on what an be done to fix it? @Will and @Dayna, We were with this same case and were able to resolve with following code: import com.atlassian.jira.issue.Issue import com.atlassian.jira.issue.MutableIssue def mIssue = issue as MutableIssue return mIssue.getTimeSpent() || mIssue.getModifiedFields().get("worklog") It worked for us against JIRA 7.1.9 :D How about making the log work fields compulsory on resolve screen refer.
https://community.atlassian.com/t5/Questions/how-to-force-people-log-work-before-Resolve-Issue/qaq-p/281034
CC-MAIN-2018-34
refinedweb
474
57.06
Brett Cannon wrote: > On 2/8/07, Ron Adam <rrr at ronadam.com> wrote: >> Brett Cannon wrote: >> > On 2/7/07, Ron Adam <rrr at ronadam.com> wrote: >> >> Brett Cannon wrote: >> >> > On 2/4/07, Ron Adam <rrr at ronadam.com> wrote: >> >> It would be nice if __path__ were set on all modules in packages no >> >> matter how >> >> they are started. >> > >> > There is a slight issue with that as the __path__ attribute represents >> > the top of a package and thus that it has an __init__ module. It has >> > some significance in terms of how stuff works at the moment. >> >> Yes, and after some reading I found __path__ isn't exactly what I was >> thinking. >> >> It could be it's only a matter of getting that first initial import >> right. An >> example of this is this recipe by Nick. >> >> > > But Nick already rolled this stuff into 2.5 when package support was > added to runpy. I'll take a look at runpy today sometime. >>. ;-) >> What you would end up doing >> is just moving the [if __name__=="__main__": __main__()] line off the >> end of >> program so that all program have it automatically. We just won't see >> it. And >> instead of checking __name__, the interpreter would check some other >> attribute. >> >> So what and where would that other attribute be? >> > > If a thing was done like that it would be in the global namespace of > the module just like __name__ is. Forget this, I like the idea above much better! It's fully consistent with class's and so it would be easy to explain as well. A step towards unification of class's and modules. The __name__ attribute isn't changed as well. ;-) >> If someone wants to import an external to a package module with the >> same name as >> the package, (or modules in some other package with the same name), >> then there >> needs to be an explicit way to do that. But I really don't think this >> will come >> up that often. >> >> >> <clipped general examples> >> >> > Or you could have copied the code I wrote for the filesystem >> > importer's find_module method that already does this classification. >> > =) >> > >> > Part of the problem of working backwards from path to dotted name is >> > that it might not import that way. >> >> Maybe it should work that way? If someone wants other than that >> behavior, then >> maybe there can be other ways to get it? >> > > That's my point; the "other way" needs to work and the default can be > based on the path. We need to get much more specific on this. ie... examples. I don't think we will get anywhere trying to generalize this point. >> Hers's an example of a situation where you might think it would be a >> problem, >> but it isn't: >> >> pkg1: >> __init__.py >> m1.py >> spkg1: >> __init__.py >> m3.py >> dirA: >> m4.py >> pkg2: >> __init__.py >> m5.py >> >> You might think it wouldn't work for pkg2.m5, but that's actually ok. >> pkg2 is a >> package just being stored in dirA which just happens to be located inside >> another package. >> >> Running m5.py directly will run it as a submodule of pkg2, which is >> what you >> want. It's not in a sub-package of pkg1. And m4.py is just a regular >> module. >> >> Or are you thinking of other relationships? > > I am thinking of a package's __path__ being set to a specific > directory based on the platform or something. That totally changes > the search order for the package that does not correspond to its > directory location. In that case, I think the developer and anyone who tries to run the script in a way the developer did not intend will have to be on their own. For example if I add a directory to __path__ to include a module that normally lives someplace else. Thats ok. If I execute any of 'my' modules in 'my' package. It will import __init__.py and set the __path__ accordingly and everything will still work. But if I execute the 'other' module directly, then python needs to run it in what ever context it normally lives in. We shouldn't try to figure out what 'other' packages it may be used in, because it may be used in many packages. So the only thing to do is run it in the context it is in where we find it. And not this 'special' context we put it in. For situations where we might have several subdir's in our package that may be choosen from depending on platform (or other things). We may be able to put a hint in the directory, such as a _init__.py file. (Notice the single underscore.) Or some variation if that's too subtle. The idea is it's an inactive sub-package and the main packages __init__ file could activate a 'reserved' sub-package using some method like renaming the _init__.py to __init__.py, (but I really don't like renaming as a way to do that.) It would be better to have some other way. Then we could possibly still do the search up to find the root package by including _init__.py files in our search in those cases as? >> > >> > Yes. I really think ditching this whole __main__ name thing is going >> > to be the only solid solution. Defining a __main__() method for >> > modules that gets executed makes the most sense to me. Just import >> > the module and then execute the function if it exists. That allow >> > runpy to have the name be set properly and does away with import >> > problems without mucking with import semantics. Still have the name >> > problem if you specify a file directly on the command line, though. >> >> I'll have to see more details of how this would work I think. Part of >> me says >> sound good. And another part says, isn't this just moving stuff >> around? And what >> exactly does that solve? > > It is moving things around, but so what? Moving it keeps __name__ > sane. At work a global could be set to the name of the module that > started the execution or have an alias in sys.modules for the > '__main__' key to the module being executed. Or just use __call__(). It already behaves in the way you want for class's. It could be reused I think for modules. The only difference is it won't have a self arguments. Which I think is not a problem. > The point of the solution it provides is it doesn't muck with import > semantics. It allows the execution stuff to be external to imports > and be its own thing. > > Guido has rejected this idea before (see PEP 299 : > ), but then again there was > not this issue before. > > Now I see why Nick said he wouldn't touch this in PEP 338. =) I read the thread, and backwards compatibility as well as Guido just not liking it were the reasons it was rejected. Backwards compatibility is less of a problem for py3k, but I also agree with his reasons for not liking it. I think a reserved __call__() function for modules may be a little easier to sell. It's already reserved in other situations for very much the same purpose as well. Cheers, Ron
https://mail.python.org/pipermail/python-ideas/2007-February/000214.html
CC-MAIN-2017-30
refinedweb
1,212
75.1
The guide presented here deals primarially with Python. Some of you may prefer to use a language like Node.js. Happily, your kind classmate Billy Kwok has written a great guide about how do do that here! If you write a guide for a different language I'd love to put it here as well. In last week’s Thursday lab we discussed making our Arduino programs interactive. The approach outlined there was limited though as we could only perform that interaction from inside of the Arduino IDE. In practice and in your projects we want to escape the IDE and thus increase the amount of digital real estate that our tangible interfaces have access to. In today’s Thursday bonus lab we will perform this escape act and start communicating with our Arduinos from Python programs. Having done that we will then explore how we can expand the types of user input that we can use to control our Arduino. As we saw last week, we can send messages to our Arduino via the IDE’s serial monitor. For example, with the code below loaded onto our Arduino we can send messages to the device which it will send back by opening Tools -> Serial Monitor in the context menu and typing into the top bar. void setup() { Serial.begin(9600); } void loop() { if (Serial.available()) { String message = Serial.readStringUntil('\n'); Serial.print(": " + message + "\n"); } } An interesting feature of this ‘serial’ style of interaction is that it is neither unique to Arduinos nor the Arduino IDE. We can communicate with our Arduino via this method using essentially whatever language or method we’d like. Let’s take a look at how we might do so using the Python programming language. Python is a nice choice for this task as its syntax is designed to be relatively human readable even if you don’t know the language and it has an extensive echosystem of libraries to interface with other systems. Start by following the directions here to install Python if you do not have it installed already. Having done that you ought to be able to open the Terminal / Command Prompt application on macOS / Windows and run python by typing python. Here is an example python session in my terminal where I ask the language to print hello! to the screen: $ python3 >>> print('hello!') hello! >>> exit() Having installed python we can install a library to interface with the Arduino’s serial port by running the following command in our terminals: $ python3 -m pip install pyserial Having gotten set up, go ahead and upload the “echo” Arduino sketch from the introduction to this section to your Arduino board. Then copy the following python program into a file called pyserial.py on your computer: import serial def get_ports(): """List all of the avaliable ports on the device.""" import serial.tools.list_ports return serial.tools.list_ports.comports() def prompt_for_and_get_port(): """Prompts the user to select a port. Returns the selected one. Input is expected to be a number in [0, num_ports) and is read from standard in. An input outside of that range will result in re-prompting until a valid input is provided. """ ports = get_ports() for index, port in enumerate(ports): print("{}) {}\t{}".format(index, port.device, port.description)) selection = int(input("please input the port number that your port is on: ")) while selection < 0 or selection >= len(ports): print("error: {} is not between 0 and {}".format(selection, len(ports) - 1)) selection = int(input("please input the port number that your port is on: ")) return ports[selection] def prompt_for_and_get_serial(baudrate=9600): """Prompts the user to select a port and returns a serial object connected to that port. By default the returned serial object uses a 9600 baudrate but this can be modified by passing one to the function. """ port = prompt_for_and_get_port() return serial.Serial(port=port.device, baudrate=baudrate) serial = prompt_for_and_get_serial() while True: send = input('>> ') serial.write(bytes(send, 'utf-8')) print(serial.readline().decode('utf-8')[:-1]) If you are using a Mac you can do this by copying the above code and running pbpaste > pyserial.py inside of your Terminal window. If you are on Windows and you are comfortable using a text editor you may create the file like that. The program that you’re looking at exposes a function called prompt_for_and_get_serial which when run will ask the user what port their Arduino is connected to and open a serial connection to the Arduino there. It will then take input from the user, send that input to the Arduino, and display the Arduino’s response. Here is an example of an interaction with the program. Note that I run python3 pyserial.py to start the program. $ python3 pyserial.py 0) /dev/cu.blueberry-SPPDev-11 n/a 1) /dev/cu.blueberry-SPPDev-4 n/a 2) /dev/cu.Bluetooth-Incoming-Port n/a 3) /dev/cu.iPhone-WirelessiAPv2 n/a 4) /dev/cu.elzekeo-SPPDev n/a 5) /dev/cu.usbmodem14201 Arduino Uno please input the port number that your port is on: 5 >> hello : hello >> goodbye : goodbye Having done that we have successfully escaped the Arduino IDE and are controlling the Arduino from a python program! You can find the complete source code with some documentation for this program here. Recall from the Wednesday lab item two for your homework: Change the code so that you can control the RGB values with multiple key presses. For example, pressing ‘r’ 5 times will set the brightness to 50% (or brightness = 127) and pressing ‘r’ 10 times will set it to 100% (or brightness = 255) Performing this interaction in the Arduino IDE produces a less than desirable result. In order to send data to the Arduino via the IDE you are required to type your input into the text box and then press enter. This effectively transforms the interaction from ‘press r five times’ to ‘press r followed by enter 5 times.’ An advantage of using our Python script is that we no longer have this constraint. We can send input to the user whenever we would like with serial.write(). In order to do this I’ve provided a python script which exposes a getch function. getch reads a character from input on the terminal without requiring that the user presses enter. You can find the source code here along with instructions on using it at the top of the file. Here’s an example of a program that sends a number to the Arduino representing the amount of time that has passed between when the user last pressed a key and the current key press. This program uses the functions from the two provided python programs to do this: def get_time(): return round(time.time() * 1000) serial = prompt_for_and_get_serial() lastPress = get_time() while True: prompt = getch() if prompt == '.': break pressTime = get_time() elapsed = 3 * (pressTime - lastPress) // 4 if elapsed < 255: serial.write(bytes(str(255 - elapsed), 'utf-8')) # Write a space so that the Arduino can tell the sent numbers # apart. serial.write(b' ') lastPress = pressTime Use the example code here to communicate with the Arduino. Can you enable an interaction not otherwise possible with just the Arduino IDE? Some ideas: Serial.parseInt()may be helpful here) and then use python to send some information about your computers internet connection to the Arduino.
https://tui.negativefour.com/non-arduino-ide.html
CC-MAIN-2022-21
refinedweb
1,219
63.9
Parses incoming Sendgrid Webhooks in Pyramid apps Project description Parses incoming Sendgrid Webhooks in Pyramid apps - Free software: MIT license - Documentation:. Features To use this app, add a configuration statement with your intended webhook callback path: config.include('pyramid_sendgrid_webhooks', '/sendgrid/webhooks') Then, set up subscribers for any events that you want to be notified of: from pyramid_sendgrid_webhooks import events def handle_bounce(event): request = event.request print event.reason ... config.add_subscriber(handle_bounce, events.BounceEvent) Currently the app adds a single endpoint at {PREFIX}/receive. This will be the webhook path to give to Sendgrid. In the example above, the full endpoint would therefore be at /sendgrid/webhooks/receive. Credits Tools used in rendering this package: History 1.2.2 (2015-12-15) - Updating documentation with modules 1.2.1 (2015-12-15) - Update trove classifiers 1.2.0 (2015-12-14) - Correct package listing in setup.py 1.0.0 (2015-12-07) - First release on PyPI. Project details Download files Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
https://pypi.org/project/pyramid-sendgrid-webhooks/
CC-MAIN-2021-49
refinedweb
179
52.26
So I am trying to develop a program to convert Date Formats. i.e If the date entered (mm,dd,yy):02,16,91. The output should be like this : This the 16th day of February 1991. And I've done this so far : #include <stdio.h> #include <stdlib.h> int main() { int m,d,y; printf("Please Enter the Date like this format (mm/dd/yy)>\n"); scanf("%d/%d/%d/\n",&m,&d,&y); printf("The Day is %2d\n",d); printf("The Month is %2d\n",m); printf("The Year is %2d\n",y); printf("This is the %2d Day of %2d %2d",d,m,y); return 0; }Can you help me please ? What is the best way to do it and how ? Thanks
http://forum.codecall.net/topic/59328-help-please/
crawl-003
refinedweb
127
83.56
This is the BuildBot manual. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. --- The Detailed Node Listing --- Introduction Installation Troubleshooting Concepts Version Control Systems Users Configuration Getting Source Code Changes Change Sources Build Process Build Steps Simple ShellCommand Subclasses Source Checkout Build Factories BuildFactory Process-Specific build factories Status Delivery to do the right thing. When everyone can see the status of the project, developers are encouraged to keep the tree in good working order.. Both the buildmaster and the buildslaves require Twisted-1.3.0 or later. They have been briefly tested against Twisted-1.2.0, and might even work with Twisted-1.1.0, but 1.3.0 is the version that has received the most testing. They run against Twisted-2.0.0 as well, albeit with a number of warnings about the use of deprecated features. If you use Twisted-2.0, you'll need at least "Twisted" (the core package), and you'll also want TwistedMail, TwistedWeb, and TwistedWords (for sending email, serving a web status page, and delivering build status via IRC, respectively).. The Buildbot is installed using the standard python distutils module.. To test this, shift to a different directory (like /tmp), and run: pydoc buildbot If it shows you a brief description of the package and its contents, the install went ok. If it says no Python documentation found for 'buildbot', then something went wrong. Windows users will find these files in other places. You will need to make sure that python can find the libraries, and will probably find it convenient to have buildbot on your PATH. If you wish, you can run the buildbot unit test suite like this: PYTHONPATH=. trial -v buildbot.test This should run up to 109 tests, depending upon what VC tools you have installed. On my desktop machine it takes about two minutes to complete. Nothing should fail, a few might be skipped. If any of the tests fail, you should stop and investigate the cause before continuing the installation process, as it will probably be easier to track down the bug early. If you want to test the VC checkout process, you'll need to install a tarball of repositories, available from . Otherwise there are about 8 tests which will be skipped (all with names like testSVN and testArchHTTP). If you unpack this tarball in ~/tmp, it will create ~/tmp/buildbot-test-vc-1, and you can enable the extra tests with: PYTHONPATH=. BUILDBOT_TEST_VC=~/tmp trial -v buildbot.test If you cannot or do not wish to install the buildbot into a site-wide location like /usr or /usr/local, you can also install it into the account's home directory. Do the install command like this: python setup.py install --home=~ That will populate ~/lib/python and create ~/bin/buildbot. Make sure this lib directory is on your PYTHONPATH. As you learned earlier (see System Architecture), the buildmaster runs on a central host (usually one that is publically visible, so everybody can check on the status of the project), and controls all aspects of the buildbot system. Let us call this host buildbot.example.org. You may wish to create a separate user account for the buildmaster, perhaps named buildmaster. This can help keep your personal configuration distinct from that of the buildmaster and is useful if you have to use a mail-based notification system (see Change Sources). However, the Buildbot will work just fine with your regular user account. You need to choose a directory for the buildmaster, called the basedir. This directory will be owned by the buildmaster, which will use configuration files therein, and create status files as it runs. ~/Buildbot is a likely value. If you run multiple buildmasters in the same account, or if you run both masters and slaves, you may want a more distinctive name like ~/Buildbot/master/gnomovision or ~/Buildmasters/fooproject. Once you've picked a directory, use the buildbot master command to create the directory and populate it with startup files: buildbot master basedir You will need to create a configuration file (see Configuration). In addition to buildbot.tac, a small Makefile.sample is installed. This can be used as the basis for customize daemon startup, See Launching the daemons.. Follow the instructions given earlier (see Installing the code). If you use a separate buildslave account, and you didn't install the buildbot code to a shared location, then you will need to install it with --home=~ for each account that needs it.. This should be somewhere in the buildslave's account, typically named after the project which is being tested. The buildslave will not touch any file outside of this directory. Something like ~/Buildbot or ~/Buildslaves/fooproject is appropriate. When the buildbot admin configures the buildmaster to accept and use your buildslave, they will provide you with the following pieces of information: Now run the 'buildbot' command as follows: buildbot slave BASEDIR MASTERHOST:PORT SLAVENAME PASSWORD This will create the base directory and a collection of files inside, including the buildbot.tac file that contains all the information you passed to the buildbot command.. If you run many buildslaves, you may want to create ~buildslave/info and share it among all the buildslaves with symlinks. Both the buildmaster and the buildslave run as daemon programs. To launch them, pass the working directory to the buildbot command: buildbot start BASEDIR: @reboot buildbot BASEDIR There is also experimental support for sysvinit-style /etc/init.d/buildbot startup scripts. debian/buildbot.init and debian/buildbot.default may be useful to look.. To stop a buildmaster or buildslave manually, use: buildbot stop. The following shortcut is available: buildbot sighup BASEDIR {} \; Here are a few hints on diagnosing common problems.bot stop BASEDIR ; buildbot start BASEDIR will speed up the process. From the buildmaster's main status web page, you can force a build to be run on your build slave. Figure out which column is for a builder that runs on your slave, click on that builder's name, and the page that comes up will have a “Force Build” button. Fill in the form, hit the button, and a moment later you should see your slave's twistd.log filling with commands being run. Using pstree or top should also reveal the cvs/make/gcc/etc processes being run by the buildslave. Note that the same web page should also show the admin and host information files that you configured earlier. This chapter defines some of the basic concepts that the Buildbot uses. You'll need to understand how the Buildbot sees the world to configure it properly. These source trees come from a Version Control System of some kind. CVS and Subversion are two popular ones, but the Buildbot supports others. All VC systems1 have some notion of an upstream repository which acts as a server, from which clients can obtain source trees according to various parameters. The VC repository provides source trees of various projects, for different branches, and from various points in time. The first thing we have to do is to specify which source tree we want to get.. Each time someone commits a change to the project, a new revision becomes available. These revisions can be described by a tuple with two items: the first is a branch tag, and the second is some kind of timestamp or revision stamp. Arch, or a labeled tag used in CVS)2. The SHA1 revision ID used by Monotone is also a kind of revision stamp, in that it specifies a unique copy of the source tree. When we aren't intending to make any changes to the sources we check out (at least not any that need to be committed back upstream), there are two basic ways to use a VC system: Build personnel or CM staff typically use the first approach: the build that results is (ideally) completely specified by the two parameters given to the VC system: repository and revision tag. This gives QA and end-users something concrete to point at when reporting bugs. others that occurred later. We assume that the Changes arrive at the buildbot (through one of the mechanisms described in see Change Sources) in the same order in which they are committed to the repository. The Buildbot waits for the tree to become stable before initiating a build, for two reasons. The first is that developers frequently commit several changes in a group, even when the VC system provides ways to make atomic transactions involving multiple files at the same time. Running a build in the middle of these sets of changes would use an inconsistent set of source files, and is likely to fail. The tree-stable-timer is intended to avoid these useless builds that include some of the developer's changes but not all. The second reason is that some VC systems (i.e. CVS) do not provide repository-wide transaction numbers, such Builders3. For CVS, the static specifications are repository, module, and branch tag (which defaults to HEAD). In addition to those, each build uses a timestamp (or omits the timestamp to mean the latest).. Arch specifies a repository by URL, as well as a version which is kind of like a branch name. Arch uses the word archive to represent the repository. Arch lets you push changes from one archive to another, removing the strict centralization required by CVS and SVN. It seems to retain., would use a fully-qualified Arch ID, which looks like an email address). Each StatusNotifier will map the who attribute into something appropriate for their particular means of communication: an email address, an IRC handle, etc. step.Source class. revisionis an int, seconds since the epoch revisionis an int, a transation number (r%d) revision revisionis a string, ending in –patch-%d revisionis an int, the transaction number The Change might also have a branch attribute. This is primarily intended to represent the CVS named branch (since CVS does not embed the branch in the pathname like many of the other systems), however it could be used for other purposes as well (e.g. some VC systems might allow commits to be marked as cosmetic, or docs-only, or something). The Build, in its isBranchImportant method, gets to decide whether the branch is important or not. This allows you to configure Builds which only fire on changes to a specific branch. For a change to trigger a build, both the branch must be important, and at least one of the files inside the change must be considered important. What is a Change?. Each Change has a single User who is responsible for. The buildbot.status.mail.MailNotifier class having an unrelated hostname),. Like MailNotifier, the buildbot.status.words.IRC class provides a status target which can announce the results of each build. It also provides an interactive interface by responding to online queries posted in the channel or sent as private messages. The buildbot can be configured map User names to IRC nicknames, to watch for the recent presence of these nicknames, and to deliver build status messages to the interested parties. Like MailNotifier does for IRC object. The Buildbot also offers a build”). The buildbot's behavior is defined by the “config file”, which normally lives in the master.cfg file in the buildmaster's base directory (but this can be changed with an option to buildbot master). This file completely specifies which Builders are to be run, which slaves they should use, how Changes should be tracked, and where the status information is to be sent. The buildmaster's minumum set of actions necessary to bring the buildbot up to date: Builders which are not changed are left untouched, and Builders which are modified get to keep their old event history.: basedir os.path.expanduser(os.path.join(basedir, 'master.cfg')) sighup BASEDIR). There are a couple of basic settings that you use to tell the buildbot what project it is working on. This information is used by status reporters to let users find out more about the codebase being exercised by this particular Buildbot installation. c['projectName'] = "Buildbot" c['projectURL'] = "" c['buildbotURL'] = "" projectName is a short string will be used to describe the project that this buildbot is working on. For example, it is used as the title of the waterfall HTML page. projectURL is a string that gives a URL for the project as a whole. HTML status displays will show projectName as a link to projectURL, to provide a link from buildbot HTML pages to your project's home page. The buildbotURL string should point to the location where the buildbot's internal web server (usually the html.Waterfall page) is visible. This typically uses the port number set when you create the Waterfall object, the buildbot needs your help to figure out a suitable externally-visible host name.. The c['sources'] key is a list of ChangeSource instances4. This defines how the buildmaster learns about source code changes. More information about what goes here is available in See Getting Source Code Changes. c['sources'] = [buildbot.changes.pb.PBChangeSource()]. c['slavePortnum'] = 10000'), ] The slavenames must be unique, of course. The password exists to prevent evildoers from interfering with the buildbot by inserting their own (broken) buildslaves into the system. Buildslaves with an unrecognized slavename or a non-matching password will be rejected when they attempt to connect. The['bots']list. Each buildslave can accomodate multiple Builders. builddir factory buildbot.process.factory.BuildFactoryinstance which controls both when the build is run, and how it is performed. Full details appear in their own chapter, See Build Process. Parameters like the location of the CVS repository and the compile-time options used for the build are generally provided as arguments to the factory's constructor. Other optional keys may be set on each Builder: periodicBuildTime periodicBuildTimeseconds. This may be useful when you first get started using the buildbot, before you have a Change Source configured. E.g., if you want a buildbot which just recompiles the tree every hour, set this to 60*60. category c['interlocks'] is a list of Interlock specifications. Each one is a 3-tuple of interlock-name, feeder-list, and watcher-list. The interlock name is a unique string which distinguishes one interlock from another. The feeder-list is a list of strings which name the Builders that this interlock depends upon: the interlock will not <q>open</q> for any given Change until all those Builders have completed successfully. The watcher-list is a list of strings which name the Builders that wait for this interlock to open. Those Builders will not run until their Interlocks' feeding Builders have passed. This feature is scheduled to be decomposed into a more useful pair: Dependency and Lock. The first indicates that one build depends upon another completing correctly, while the second indicates two builds that may not be run at the same time. Lock are useful when running multiple Builders on the same (slow) buildslave, where running the builds in parallel would cause thrashing.=["#example"])) Status delivery has its own chapter, See Status Delivery, in which all the built-in status targets are documented. If you set c['debugPassword'], then you can connect to the buildmaster with the diagnostic tool launched by buildbot debugclient MASTER:PORT. From this tool, you can reload the config file, manually force builds, and inject changes, which may be useful for testing your buildmaster without actually commiting changes to your repository (or before you have the Change Sources set up). The debug tool uses the same port number as the slaves do: c['slavePortnum'], and is authenticated with this password. c['debugPassword'] = "debugpassword" If you set c['manhole'] to an instance of the buildbot.master.Manhole class, you can telnet into the buildmaster and get an interactive Python shell, which may be useful for debugging buildbot internals. It is probably only useful for buildbot developers. from buildbot.master import Manhole c['manhole'] = Manhole(9999, "admin", "password"). A Version Control System mantains a source tree, and tells the buildmaster when it changes. The first step of each Build is typically to acquire a copy of some version of this tree. This chapter describes how the Buildbot thinks about source trees and Changes, and how it learns about them. Each Buildmaster watches a single source tree. Changes can be provided by a variety of ChangeSource types, however any given project will typically have only a single ChangeSource active. This section provides a list of ChangeSource types and descriptions of how to set them up. Each source tree has a nominal top. Each Change has a list of filenames, which are all relative to this top location. The ChangeSource is responsible for doing whatever is necessary to accomplish this. This generally involves a Prefix: a partial pathname which is stripped from the front of all filenames provided to the ChangeSource. Files which are outside this sub-tree are ignored by the ChangeSource: it does not generate Changes for those files.'] = [s] The CVSToys package provides a server which runs on the machine that hosts the CVS repository it watches. It has a variety of ways to distribute commit notifications, and offers a flexible regexp-based way to filter out uninteresting changes. One of the notification options is named PBService and works by listening on a TCP port for clients. These clients subscribe to hear about commit notifications. The buildmaster has a PBService client built in. There are two versions of it, one for old versions of CVSToys (1.0.9 and earlier) which used the oldcred authentication framework, and one for newer versions (1.0.10 and later) which use newcred. Both are classes in the buildbot.changes.freshcvs package. FreshCVSSourceNewcred objects are created with the following parameters: hostand port userand passwd freshcvs). These must match the server's values, which are defined in the freshCfgconfiguration file (which lives in the CVSROOT directory of the repository). prefix CVSToys also provides a MailNotification action which will send email to a list of recipients for each commit. This tends to work better than using /bin/mail from within the CVSROOT/loginfo file directly, as CVSToys will batch together all files changed during the same CVS invocation, and can provide more information (like creating a ViewCVS URL for each file changed). The Buildbot's FCMaildirSource. The last kind of ChangeSource actually listens on a TCP port for clients to connect and push change notices into the Buildmaster. This is used by the Subversion notification tool (in contrib/svn_buildbot.py), which is run by the SVN server and connects to the buildmaster directly. This is also useful for creating new kinds of change sources that work on a push model instead of some kind of subscription scheme, for example a script which is run out of a .forward file. This ChangeSource can be configured to listen on its own TCP port, or it can share the port that the buildmaster is already using for the buildslaves to connect. (This is possible because the PBChangeSource uses the same protocol as the buildslaves, and they can be distinguished by the username attribute used when the initial connection is established). It might be useful to have it listen on a different port it, for example, you wanted to establish different firewall rules for that port. You could allow only the SVN repository machine access to the PBChangeSource port, while allowing only the buildslave machines access to the slave port. Or you could just expose one port and run everything over it. Note: this feature is not yet implemented, the PBChangeSource will always share the slave port and will always have a user name of change, and a passwd of changepw. These limitations will be removed in the future.. The PBChangeSource is created with the following arguments: port None(which is the default), it shares the port used for buildslave connections. Not Implemented, always set to None. userand passwd changeand changepw. useris currently always set to change, passwdis always set to changepw. prefix This section lists all the standard BuildStep objects available for use in a Build, and the parameters which can be used to control each. The standard Build (described in the <a href="factories.xhtml">BuildFactory</a> docs) runs a series of BuildSteps in order, only stopping if one of them requests that the build be halted. It collects status information from each one to create an overall build status (of SUCCESS, WARNINGS, or FAILURE). All BuildSteps accept some common parameters to control how their individual status affects the overall build. Arguments common to all BuildStep subclasses: name haltOnFailure flunkOnWarnings flunkOnFailure warnOnWarnings warnOnFailure This is a useful base class for just about everything you might want to do during a build (except for the initial source checkout). It runs a single command in a child shell on the build slave. All stdout/stderr is recorded into a LogFile. The step. All ShellCommands are run by default in the “workdir”, which defaults to the build subdirectory of the slave builder's base directory. The absolute path of the workdir will thus be the slave's basedir (set as an option to mktap) plus the builder's basedir (set in the builder Several subclasses of ShellCommand are provided as starting points for common build steps. These are all very simple: they just override a few parameters so you don't have to specify them yourself, making the master.cfg file less verbose. This is intended to handle the ./configure step from autoconf-style projects, or the perl Makefile.PL step from perl MakeMaker.pm-style modules. The default command is ./configure but you can change this by providing a command= parameter. This is meant to handle compiling or building a project written in C. The default command is make all. When the compile is finished, the log file is scanned for GCC error/warning messages and a summary log is created with any problems that were seen (TODO: the summary is not yet created). This is meant to handle unit tests. The default command is make test, and the warnOnFailure flag is set. The first step of any build is typically to acquire the source code from which the build will be performed. There are several classes to handle this, each for a different source control system. For a description of how Buildbot treats source control in general, take a look at the <a href="source.xhtml">Getting Sources</a> documentation. All source checkout steps accept some common parameters to control how they get the sources and where they should be placed. The remaining per. The CVS build step performs a CVS checkout or update. It takes the following arguments: cvsroot :pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot cvsmodule module, which is generally a subdirectory of the CVSROOT. The cvsmodule for the Buildbot source code is buildbot. branch -rargument. This is most useful for specifying a branch to work on. Defaults to HEAD. global_options -Doption. Defaults to half of the parent Build's treeStableTimer. The SVN build step performs a Subversion checkout or update. It takes the following arguments. svnurl URLargument that will be given to the svn checkoutcommand. It dictates both where the repository is located and which sub-tree should be extracted. In this respect, it is like a combination of the CVS cvsrootand cvsmodulearguments. For example, if you are using a remote Subversion repository which is accessible through HTTP at a URL of, and you wanted to check out the trunk/calcsub-tree, you would use svnurl=""as an argument to your SVNstep. The Darcs build step performs a Darcs checkout or update. It takes the following arguments: repourl The Arch build step performs an Arch checkout or update using the tla client. It takes the following arguments: url version archive Bazaarstep, below. Bazaar is an alternate implementation of the Arch VC system, which uses a client named baz. The checkout semantics are just different enough from tla that there is a separate BuildStep for it. It takes exactly the same arguments as Arch, except that the archive= parameter is required. (baz does not emit the archive name when you do baz register-archive, so we must provide it ourselves). The P4Sync build step performs a Perforce update. It is a temporary facility: a more complete P4 checkout step (named P4) will eventually replace it. This step requires significant manual setup on each build slave. It takes the following arguments. p4port Each Builder is equipped with a <q>build factory</q>, which is responsible for producing the actual Build objects that perform each build. This factory is created in the configuration file, and attached to a Builder through the 'factory' element of its dictionary. The standard BuildFactory object creates Build objects by default. These Builds will each execute a collection of BuildSteps in a fixed sequence. Each step can affect the results of the build, but in general there is little intelligence to tie the different steps together. You can create subclasses of Build to implement more sophisticated build processes, and then use a subclass of BuildFactory. The default BuildFactory, provided in the buildbot.process.factory module,. flunkOnFailureor flunkOnWarningsflag is set, then a result of FAILURE or WARNINGS will. warnOnFailureor warnOnWarningsflag is set, then a result of FAILURE or WARNINGS will mark the build as having WARNINGS, and the remaining steps will still be executed. This may be appropriate for certain kinds of optional build or test steps. For example, a failure experienced while building documentation files should be made visible with a WARNINGS result/process/step.py to see how the other Steps are marked. Each Step is created with an additional workdir argument that indicates where its actions should take place. This is specified as a subdirectory of the slave builder's base directory, with a default value of build. This is only implemented as a step argument (as opposed to simply being a part of the base directory) because the CVS/SVN steps need to perform their checkouts from the parent directory. Several attributes from the BuildFactory are copied into each Build. treeStableTimer perl perlexecutable to use. Defaults to just perl. python pythonexecutable to use. Defaults to just python. test: More details are available in the docstrings for each class, use pydoc buildbot.status.html.Waterfall to see them. Most status delivery objects take a categories= argument, which can contain a list of “category” names: in this case, it will only show status for Builders that are in one of the named categories. (implementor's note: each of these objects should be a service.MultiService which will be attached to the BuildMaster object when the configuration is processed. They should use self.parent.getStatus() to get access to the top-level IStatus object.). The Buildbot's home page is at For configuration questions and general discussion, please use the buildbot-devel mailing list. The subscription instructions and archives are available at [1] except Darcs, but since the Buildbot never changes] although this [4] To be precise, it is a list of objects which all implement the buildbot.interfaces.IChangeSource Interface
http://docs.buildbot.net/0.6.6/
CC-MAIN-2018-34
refinedweb
4,550
64.1
15 October 2008 23:15 [Source: ICIS news] ?xml:namespace> “Second-gen cellulosic bioethanol will reduce CO2 [carbon dioxide] emissions by about 90%,” said Thomas Videbaek, executive vice president, BioBusiness, Novozymes, at a meeting for Societé de Chimie Industrielle in In 2007, biofuel production replaced 1m bbl/day crude oil everyday, he said. Bioethanol reduces CO2 emissions by 30-70% compared with gasoline, he said. “We save 30 kilograms of CO2 for every 1 kilogram of enzyme used,” Videbaek said. That translates to about 20m tonnes/year of CO2 emissions reduction, he said. Videbaek said this was just one example of how Novozymes uses biotechnology to affect the chemical industry. Novozymes has also teamed up with US-based Cargill to develop commercial solutions for bio-acrylic acid. “If successful, we calculate a full-scale bio-acrylic acid plant that can produce 160,000 tonnes/year,” he said. The company will still need to deal with a host of difficult challenges, but Videbaek said Novozymes is ready. One challenge is that pricing of renewable feedstocks can cause an increase in the raw material cost structure. Another challenge is surpassing the standards of traditional chemistry, he said. Chemicals today are already low cost and considered optimal performers, he said.
http://www.icis.com/Articles/2008/10/15/9164108/Novozymes-to-launch-2nd-generation-biofuels.html
CC-MAIN-2013-48
refinedweb
207
54.93
0 Hello, I am taking a computer programming class this semester and working on a project. I am trying to read a text file into a 2D list. The text file contains ten rows and ten columns of numbers, either a 0,1,or 2. When i try to read the file it adds the numbers on to the end of the list...this is what i have def getfromfile(): infile = input("Enter the name of the file with the original state of the network: ") infile = open(infile,"r") OLD =[ [" "]*COLUMN for i in range(ROW)] for line in infile: for i in range(ROW): OLD.append(i) for j in range(COLUMN): OLD.append(j) for i in range(ROW): for j in range(COLUMN): if i == 0: OLD[i][j] = DEAD elif i == 1: OLD[i][j] = CLEAN else: OLD[i][j] = INFECTED The whole idea is that the list is a network of computers that are infected. OLD is the list of computers that is the network and as each time cycle passes, more computers get infected...i am just having a problem getting the "original state" from the text file... THANKS!!!
https://www.daniweb.com/programming/software-development/threads/398951/python-school-project-program-help
CC-MAIN-2018-34
refinedweb
195
77.37
By default, HTTPBuilder classes will automatically parse XML responses using an XmlSlurper. You can try the following example in the Groovy console (Groovy 1.6+ is needed for the @Grab macro): @Grab(group='org.codehaus.groovy.modules.http-builder', module='http-builder', version='0.7') import groovyx.net.http.HTTPBuilder def http = new HTTPBuilder( '' ) http.get( path: 'weather', query:[q: 'London', mode: 'xml'] ) { resp, xml -> println resp.status println "It is currently ${xml.weather.@value.text()} in London." println "The temperature is ${xml.temperature.@value.text()} degrees Kelvin" } HTTPBuilder will automatically detect the content-type (assuming the sends the correct response header) and parse the response as XML. It is not necessary (but optional) to explicitly specify the contentType: ContentType.XML parameter. HTML response data will also be parsed automatically, by using NekoHTML which corrects the XML stream before it is passed to the XmlSlurper. The resulting behavior is that you can parse HTML as if it was well-formed XML. Keep in mind that particularly when parsing HTML documents, they often refer to external DTDs. The required behavior of all JAXP XML parsers is to retrieve and parse any referenced entities (e.g. DTD, schema, etc.) every time the document is processed (yes, even if validation is disabled.) This can become costly when the referenced entity document never changes. To avoid the overhead of downloading and parsing externally referenced documents for every request, the HTTPBuilder's built-in XML and HTML parser uses an XML Catalog to store a local copy of frequently used DTD and entity definitions. You can add additional entity files to the default parser's catalog as well. XML data is serialized using StreamingMarkupBuilder. You can define the body property as a closure like so: http.request( POST, XML ) { body = { auth { user 'Bob' password 'pass' } } } The body is then transformed to an XML string by EncoderRegistry.encodeXML(). Alternatively, the XML body may be passed as a raw string as well. Another common request is "What if I want to display the raw XML rather than parse it?" In order to do that, you're going to send a contentType parameter, to force HTTPBuilder (or RESTClient) to use the TEXT parser. However, since setting the contentType also affects the Accept request header, we might need to override that as well. For Example: import groovyx.net.http.RESTClient import static groovyx.net.http.ContentType.* def weather = new RESTClient( '' ) def resp = weather.get( path: 'weather', query:[q: 'London', mode: 'xml'], contentType: TEXT, headers : [Accept : 'application/xml'] ) println resp.data.text // print the XML Furthermore, you can use HTTPBuilder's defaults to reduce the number of parameters passed to each request method like so: def weather = new RESTClient( '' ) weather.contentType = TEXT weather.headers = [Accept : 'application/xml'] def resp = weather.get( path: 'weather', query:[q: 'London', mode: 'xml'])
http://groovy.codehaus.org/modules/http-builder/doc/xml.html
CC-MAIN-2014-15
refinedweb
468
51.24
I have a result set that might look like this: ID (no column name) anotherID ---- ---------------- ---------- 1 super 3 1 super 4 3 duper 6 4 really 7 4 really 8 I have 2 issues: First: How do I use dapper with a column with no name? Second: I want to have a parent child relationship such that I get 3 objects each with a list of anotherID's for example: public class MyObject { public int ID public string Name public int[] Children } Well, un-named columns are not supported by dapper. I never really saw a reason for them. I guess we could build support for: class Foo { [ColumnNumber(1)] public string Name {get;set;} } The trouble is that it introduces a very fragile method of querying which I strongly dislike, passing a directive to Query is just as clunky. However, if you are happy to change the way you grab the results you could work around this. var grid = QueryMultiple(@"set nocount on declare @t table(Id int, Name nvarchar(max), AnotherId int) insert @t exec proc set nocount off select Id, Name from @t select Id, AnotherId from @t "); Then use the technique here to multi map: Multi-Mapper to create object hierarchy
https://dapper-tutorial.net/knowledge-base/7326375/dapper-dot-net--no-column-name-
CC-MAIN-2019-04
refinedweb
205
55.1
You're reading the documentation for an older, but still supported, version of ROS 2. For information on the latest version, please have a look at Galactic. ROS 2 developer guide Table of Contents This page defines the practices and policies we employ when developing ROS 2. General Principles Some principles are common to all ROS 2 development: Shared ownership: Everybody working on ROS 2 should feel ownership over all parts of the system. The original author of a chunk of code does not have any special permission or obligation to control or maintain that chunk of code. Everyone is free to propose changes anywhere, to handle any type of ticket, and to review any pull request. Be willing to work on anything: As a corollary to shared ownership, everybody should be willing to take on any available task and contribute to any aspect of the system. Ask for help: If you run into trouble on something, ask your fellow developers for help, via tickets, comments, or email, as appropriate. Quality Practices Packages can ascribe to different levels of quality based on the development practices they adhere to, as per the guidelines in REP 2004: Package Quality Categories. The categories are differentiated by their policies on versioning, testing, documentation, and more. The following sections are the specific development rules we follow to ensure core packages are of the highest quality (‘Level 1’). We recommend all ROS developers strive to adhere to the following policies to ensure quality across the ROS ecosystem. Versioning We will use the Semantic Versioning guidelines ( semver) for versioning. We will also adhere to some ROS-specific rules built on top of semver's full meaning: Major version increments (i.e. breaking changes) should not be made within a released ROS distribution. Patch (interface-preserving) and minor (non-breaking) version increments do not break compatibility, so these sorts of changes are allowed within a release. Major ROS releases are the best time to release breaking changes. If a core package needs multiple breaking changes, they should be merged into their integration branch (e.g. master) to allow catching problems in CI quickly, but released together to reduce the number of major releases for ROS users. Though major increments require a new distribution, a new distribution does not necessarily require a major bump (if development and release can happen without breaking API). For compiled code, the ABI is considered part of the public interface. Any change that requires recompiling dependent code is considered major (breaking). ABI breaking changes can be made in a minor version bump before a distribution release (getting added to the rolling release). We enforce API stability for core packages in Dashing and Eloquent even though their major version components are 0, despite SemVer’s specification regarding initial development. Subsequently, packages should strive to reach a mature state and increase to version 1.0.0so to match semver'sspecifications. Caveats These rules are best-effort. In unlikely, extreme cases, it may be necessary to break API within a major version/distribution. Whether an unplanned break increments the major or minor version will be assessed on a case-by-case basis. For example, consider a situation involving released X-turtle, corresponding to major version 1.0.0, and released Y-turtle, corresponding to major version 2.0.0. If an API-breaking fix is identified to be absolutely necessary in X-turtle, bumping to 2.0.0 is obviously not an option because 2.0.0 already exists. The solutions for handling X-turtle’s version in such a case, both non-ideal, are: Bumping X-turtle’s minor version: non-ideal because it violates SemVer’s principle that breaking changes must bump the major version. Bumping X-turtle’s major version past Y-turtle (to 3.0.0): non-ideal because the older distro’s version would become higher than the already-available version of a newer distro, which would invalidate/break version-specific conditional code. The developer will have to decide which solution to use, or more importantly, which principle they are willing to break. We cannot suggest one or the other, but in either case we do require that explicit measures be taken to communicate the disruption and its explanation to users manually (beyond just the version increment). If there were no Y-turtle, even though the fix would technically just be a patch, X-turtle would have to bump to 2.0.0. This case adheres to SemVer, but breaks from our own rule that major increments should not be introduced in a released distribution. This is why we consider the versioning rules best-effort. As unlikely as the examples above are, it is important to accurately define our versioning system. Public API declaration According to semver, every package must clearly declare a public API. We will use the “Public API Declaration” section of the quality declaration of a package to declare what symbols are part of the public API. For most C and C++ packages the declaration is any header that it installs. However, it is acceptable to define a set of symbols which are considered private. Avoiding private symbols in headers can help with ABI stability, but is not required. For other languages like Python, a public API must be explicitly defined, so that it is clear what symbols can be relied on with respect to the versioning guidelines. The public API can also be extended to build artifacts like configuration variables, CMake config files, etc. as well as executables and command-line options and output. Any elements of the public API should be clearly stated in the package’s documentation. If something you are using is not explicitly listed as part of the public API in the package’s documentation, then you cannot depend on it not changing between minor or patch versions. Deprecation strategy Where possible, we will also use the tick-tock deprecation and migration strategy for major version increments. New deprecations will come in a new distribution release, accompanied by compiler warnings expressing that the functionality is being deprecated. In the next release, the functionality will be completely removed (no warnings). Example of function foo deprecated and replaced by function bar: We must not add deprecations after a distribution is released. Deprecations do not necessarily require a major version bump, though. A deprecation can be introduced in a minor version bump if the bump happens before the distro is released (similar to ABI breaking changes). For example, if X-turtle begins development as 2.0.0, a deprecation can be added in 2.1.0 before X-turtle is released. We will attempt to maintain compatibility across distros as much as possible. However, like the caveats associated with SemVer, tick-tock or even deprecation in general may be impossible to completely adhere to in certain cases. Change control process All changes must go through a pull request. We will enforce the Developer Certificate of Origin (DCO) on pull requests in ROSCore repositories. It requires all commit messages to contain the Signed-off-byline with an email address that matches the commit author. You can pass -s/ --signoffto the git commitinvocation or write the expected message manually (e.g. Signed-off-by: Your Name Developer <your.name@example.com>). DCO is not required for pull requests that only address whitespace removal, typo correction, and other trivial changes. Always run CI jobs for all tier 1 platforms for every pull request and include links to jobs in the pull request. (If you don’t have access to the Jenkins jobs someone will trigger the jobs for you.) A minimum of 1 approval from a fellow developer who did not author the pull request is required to consider it approved. Approval is required before merging. Packages may choose to increase this number. Any required changes to documentation (API documentation, feature documentation, release notes, etc.) must be proposed before merging related changes. Guidelines for backporting PRs When changing an older version of ROS: Make sure the features or fixes are accepted and merged in the master branch before opening a PR to backport the changes to older versions. When backporting to older versions, also consider backporting to any other still supported versions, even non-LTS versions. If you are backporting a single PR in its entirety, title the backport PR “[Distro] <name of original PR>”. If backporting a subset of changes from one or multiple PRs, the title should be “[Distro] <description of changes>”. Link to all PRs whose changes you’re backporting from the description of your backport PR. In a Dashing backport of a Foxy change, you do not need to link to the Eloquent backport of the same change. Documentation All packages should have these documentation elements present in their README or linked to from their README: Description and purpose Definition and description of the public API Examples How to build and install (should reference external tools/workflows) How to build and run tests How to build documentation How to develop (useful for describing things like python setup.py develop) License and copyright statements Each source file must have a license and copyright statement, checked with an automated linter. Each package must have a LICENSE file, typically the Apache 2.0 license, unless the package has an existing permissive license (e.g. rviz uses three-clause BSD). Each package should describe itself and its purpose assuming, as much as possible, that the reader has stumbled onto it without previous knowledge of ROS or other related projects. Each package should define and describe its public API so that there is a reasonable expectation for users about what is covered by the semantic versioning policy. Even in C and C++, where the public API can be enforced by API and ABI checking, it is a good opportunity to describe the layout of the code and the function of each part of the code. It should be easy to take any package and from that package’s documentation understand how to build, run, build and run tests, and build the documentation. Obviously we should avoid repeating ourselves for common workflows, like building a package in a workspace, but the basic workflows should be either described or referenced. Finally, it should include any documentation for developers. This might include workflows for testing the code using something like python setup.py develop, or it might mean describing how to make use of extension points provided by your package. Examples: capabilities: This one gives an example of docs which describe the public API catkin_tools: This is an example of describing an extension point for a package (API docs are not yet being automatically generated) Testing All packages should have some level of system, integration, and/or unit tests. Unit tests should always be in the package which is being tested and should make use of tools like Mock to try and test narrow parts of the code base in constructed scenarios. Unit tests should not bring in test dependencies that are not testing tools, e.g. gtest, nosetest, pytest, mock, etc… Integration tests can test interactions between parts of the code or between parts of the code and the system. They often test software interfaces in ways that we expect the user to use them. Like Unit tests, Integration tests should be in the package which is being tested and should not bring in non-tool test dependencies unless absolutely necessary, i.e. all non-tool dependencies should only be allowed under extreme scrutiny so they should be avoided if possible. System tests are designed to test end-to-end situations between packages and should be in their own packages to avoid bloating or coupling packages and to avoid circular dependencies. In general minimizing external or cross package test dependencies should be avoided to prevent circular dependencies and tightly coupled test packages. All packages should have some unit tests and possibly integration tests, but the degree to which they should have them is based on the package’s quality category. The following subsections apply to ‘Level 1’ packages: Code coverage We will provide line coverage, and achieve line coverage above 95%. If a lower percentage target is justifiable, it must be prominently documented. We may provide branch coverage, or exclude code from coverage (test code, debug code, etc.). We require that coverage increase or stay the same before merging a change, but it may be acceptable to make a change that decreases code coverage with proper justification (e.g. deleting code that was previously covered can cause the percentage to drop). Performance We strongly recommend performance tests, but recognize they don’t make sense for some packages. If there are performance tests, we will choose to either check each change or before each release or both. We will also require justification for merging a change or making a release that lowers performance. Linters and static analysis We will use ROS code style and enforce it with linters from ament_lint_common. All linters/static analysis that are part of ament_lint_common must be used. The ament_lint_auto documentation provides information on running ament_lint_common. General Practices Some practices are common to all ROS 2 development. These practices don’t affect package quality level as described in REP 2004, but are still highly recommended for the development process. Issues When filing an issue please make sure to: Include enough information for another person to understand the issue. In ROS 2, the following points are needed for narrowing down the cause of an issue. Testing with as many alternatives in each category as feasible will be especially helpful. The operating system and version. Reasoning: ROS 2 supports multiple platforms, and some bugs are specific to particular versions of operating systems/compilers. The installation method. Reasoning: Some issues only manifest if ROS 2 has been installed from “fat archives” or from Debians. This can help us determine if the issue is with the packaging process. The specific version of ROS 2. Reasoning: Some bugs may be present in a particular ROS 2 release and later fixed. It is important to know if your installation includes these fixes. The DDS/RMW implementation being used (see this page for how to determine which one). Reasoning: Communication issues may be specific to the underlying ROS middleware being used. The ROS 2 client library being used. Reasoning: This helps us narrow down the layer in the stack at which the issue might be. Include a list of steps to reproduce the issue. In case of a bug consider to provide a short, self contained, correct (compilable), example. Issues are much more likely to be resolved if others can reproduce them easily. Mention troubleshooting steps that have been tried already, including: Upgrading to the latest version of the code, which may include bug fixes that have not been released yet. See this section and follow the instructions to get the “master” branches. Trying with a different RMW implementation. See this page for how to do that. Branches Note These are just guidelines. It is up to the package maintainer to choose branch names that match their own workflow. It is good practice to have separate branches in a package’s source repository for each ROS distribution it is targeting. These branches are typically named after the distribution they target. For example, a humble branch for development targeted specifically at the Humble distribution. Releases are also made from these branches, targeting the appropriate distribution. Development targeted at a specific ROS distribution can happen on the appropriate branch. For example: Development commits targeting foxy are made to the foxy branch, and package releases for foxy are made from that same branch. Note This requires the package maintainers to perform backports or forwardports as appropriate to keep all branches up to date with features. The maintainers must also perform general maintenance (bug fixes, etc.) on all branches from which package releases are still made. For example, if a feature is merged into the Rolling-specific branch (e.g. rolling or main), and that feature is also appropriate to the Galactic distribution (does not break API, etc.), then it is good practice to backport the feature to the Galactic-specific branch. The maintainers may make releases for those older distributions if there are new features or bug fixes available. What about main and rolling ? main typically targets Rolling (and so, the next unreleased ROS distribution), though the maintainers may decide to develop and release from a rolling branch instead. Pull requests A pull request should only focus on one change. Separate changes should go into separate pull requests. See GitHub’s guide to writing the perfect pull request. A patch should be minimal in size and avoid any kind of unnecessary changes. A pull request must contain minimum number of meaningful commits. You can create new commits while the pull request is under review. Before merging a pull request all changes should be squashed into a small number of semantic commits to keep the history clear. But avoid squashing commits while a pull request is under review. Your reviewers might not notice that you made the change, thereby introducing potential for confusion. Plus, you’re going to squash before merging anyway; there’s no benefit to doing it early. Any developer is welcome to review and approve a pull request (see General Principles). When you start reviewing a pull request, comment on the pull request so that other developers know that you’re reviewing it. Pull-request review is not read-only, with the reviewer making comments and then waiting for the author to address them. As a reviewer, feel free to make minor improvements (typos, style issues, etc.) in-place. As the opener of a pull-request, if you are working in a fork, checking the box to allow edits from upstream contributors will assist with the aforementioned. As a reviewer, also feel free to make more substantial improvements, but consider putting them in a separate branch (either mention the new branch in a comment, or open another pull request from the new branch to the original branch). Any developer (the author, the reviewer, or somebody else) can merge any approved pull request. Library versioning We will version all libraries within a package together. This means that libraries inherit their version from the package. This keeps library and package versions from diverging and shares reasoning with the policy of releasing packages which share a repository together. If you need libraries to have different versions then consider splitting them into different packages. Development process The default branch (in most cases the master branch) must always build, pass all tests and compile without warnings. If at any time there is a regression it is the top priority to restore at least the previous state. Always build with tests enabled. Always run tests locally after changes and before proposing them in a pull request. Besides using automated tests, also run the modified code path manually to ensure that the patch works as intended. Always run CI jobs for all platforms for every pull request and include links to the jobs in the pull request. For more details on recommended software development workflow, see Software Development Lifecycle section. Changes to RMW API When updating RMW API, it is required that RMW implementations for the Tier 1 middleware libraries are updated as well. For example, a new function rmw_foo() introduced to the RMW API must be implemented in the following packages (as of ROS Foxy): Updates for non-Tier 1 middleware libraries should also be considered if feasible (e.g. depending on the size of the change). See REP-2000 for the list of middleware libraries and their tiers. Tracking tasks To help organize work on ROS 2, the core ROS 2 development team uses kanban-style GitHub project boards. Not all issues and pull requests are tracked on the project boards, however. A board usually represents an upcoming release or specific project. Tickets can be browsed on a per-repo basis by browsing the ROS 2 repositories’ individual issue pages. The names and purposes of columns in any given ROS 2 project board vary, but typically follow the same general structure: To do: Issues that are relevant to the project, ready to be assigned In progress: Active pull requests on which work is currently in progress In review: Pull requests where work is complete and ready for review, and for those currently under active review Done: Pull requests and related issues are merged/closed (for informational purposes) To request permission to make changes, simply comment on the tickets you’re interested in. Depending on the complexity, it might be useful to describe how you plan to address it. We will update the status (if you don’t have the permission) and you can start working on a pull request. If you contribute regularly we will likely just grant you permission to manage the labels etc. yourself. Programming conventions Defensive programming: ensure that assumptions are held as early as possible. E.g. check every return code and make sure to at least throw an exception until the case is handled more gracefully. All error messages must be directed to stderr. Declare variables in the narrowest scope possible. Keep group of items (dependencies, imports, includes, etc.) ordered alphabetically. C++ specific Avoid using direct streaming ( stdout/ stderrto prevent interleaving between multiple threads. Avoid using references for std::shared_ptrsince that subverts the reference counting. If the original instance goes out of scope and the reference is being used it accesses freed memory. Filesystem layout The filesystem layout of packages and repositories should follow the same conventions in order to provide a consistent experience for users browsing our source code. Package layout src: contains all C and C++ code Also contains C/C++ headers which are not installed include: contains all C and C++ headers which are installed <package name>: for all C and C++ installed headers they should be folder namespaced by the package name <package_name>: contains all Python code test: contains all automated tests and test data config: contains configuration files, e.g. YAML parameters files and RViz config files doc: contains all the documentation launch: contains all launch files package.xml: as defined by REP-0140 (may be updated for prototyping) CMakeLists.txt: only ROS packages which use CMake setup.py: only ROS packages which use Python code only README: can be rendered on GitHub as a landing page for the project This can be as short or detailed as is convenient, but it should at least link to project documentation Consider putting a CI or code coverage tag in this README It can also be .rstor anything else that GitHub supports CONTRIBUTING: describes the contribution guidelines This might include license implication, e.g. when using the Apache 2 License. LICENSE: a copy of the license or licenses for this package CHANGELOG.rst: REP-0132 compliant changelog Developer Workflow We track open tickets and active PRs related to upcoming releases and larger projects using GitHub project boards. The usual workflow is: Discuss design (GitHub ticket on the appropriate repository, and a design PR to if needed) Write implementation on a feature branch on a fork you are not a ROS 2 committer, you don’t have access to the CI farm. In that case, ping the reviewer of your PR to run CI for you) If your use case requires running code coverage: Go to ci.ros2.org Log in (top right corner) Click on the ci_linux_coveragejob Click “Build with Parameters” (left column) Be sure of leaving “CI_BUILD_ARGS” and “CI_TEST_ARGS” with the default values Hit the buildbutton At the end of the document there are instructions on how to interpret the result of the report and calculate the coverage rate If the CI job built without warnings, errors and test failures, post the links of your jobs on your PR or high-level ticket aggregating all your PRs (see example here) Note that the markdown for these badges is in the console output of the ci_launcherjob Architectural Development Practices This section describes the ideal lifecycle that should be employed when making large architectural changes to ROS 2. Software Development Lifecycle This section describes step-by-step how to plan, design, and implement a new feature: Task Creation Creating the Design Document Design Review Implementation Code Review Task creation Tasks requiring changes to critical parts of ROS 2 should have design reviews during early stages of the release cycle. If a design review is happening in the later stages, the changes will be part of a future release. An issue should be created in the appropriate ros2 repository, clearly describing the task being worked on. Writing the design document Design docs must never include confidential information. Whether or not a design document is required for your change depends on how big the task is. You are making a small change or fixing a bug: - A design document is not required, but an issue should be opened in the appropriate repository to track the work and avoid duplication of efforts. You are implementing a new feature or would like to contribute to OSRF-owned infrastructure (like Jenkins CI): - Design doc is required and should be contributed to ros2/design to be made accessible on - You should fork the repository and submit a pull request detailing the design. Mention the related ros2 issue (for example, Design doc for task ros2/ros2#<issue id>) in the pull request or the commit message. Detailed instructions are on the ROS 2 Contribute page. Design comments will be made directly on the pull request. If the task is planned to be released with a specific version of ROS, this information should be included in the pull request. Design document review Once the design is ready for review, a pull request should be opened and appropriate reviewers should be assigned. It is recommended to include project owner(s) - maintainers of all impacted packages (as defined by package.xml maintainer field, see REP-140) - as reviewers. If the design doc is complex or reviewers have conflicting schedules, an optional design review meeting can be set up. In this case, Before the meeting Send a meeting invite at least one week in advance Meeting duration of one hour is recommended Meeting invite should list all decisions to be made during the review (decisions requiring package maintainer approval) - Meeting required attendees: design pull request reviewers Meeting optional attendees: all OSRF engineers, if applicable During the meeting The task owner drives the meeting, presents their ideas and manages discussions to ensure an agreement is reached on time After the meeting The task owner should send back meeting notes to all attendees If minor issues have been raised about the design: The task owner should update the design doc pull request based on the feedback Additional review is not required If major issues have been raised about the design: It is acceptable to remove sections for which there is no clear agreement The debatable parts of the design can be resubmitted as a separate task in the future If removing the debatable parts is not an option, work directly with package owners to reach an agreement Once consensus is reached: Ensure the ros2/design pull request has been merged, if applicable Update and close the GitHub issue associated with this design task Implementation Before starting, go through the Pull requests section for best practices. For each repo to be modified: Modify the code, go to the next step if finished or at regular intervals to backup your work. Self-review your changes using git add -i. Create a new signed commit using git commit -s. A pull request should contain minimal semantically meaningful commits (for instance, a large number of 1-line commits is not acceptable). Create new fixup commits while iterating on feedback, or optionally, amend existing commits using git commit --amendif you don’t want to create a new commit every time. Each commit must have a properly written, meaningful, commit message. More instructions here. Moving files must be done in a separate commit, otherwise git may fail to accurately track the file history. Either the pull request description or the commit message must contain a reference to the related ros2 issue, so it gets automatically closed when the pull request is merged. See this doc for more details. Push the new commits. Code review Once the change is ready for code review: Open a pull request for each modified repository. Remember to follow Pull requests best practices. GitHub can be used to create pull requests from the command-line. If the task is planned to be released with a specific version of ROS, this information should be included in each pull request. Package owners who reviewed the design document should be mentioned in the pull request. Code review SLO: although reviewing pull requests is best-effort, it is helpful to have reviewers comment on pull requests within a week and code authors to reply back to comments within a week, so there is no loss of context. Iterate on feedback as usual, amend and update the development branch as needed. Once the PR is approved, package maintainers will merge the changes in. Build Farm Introduction 20.04 Focal amd64 aarch64 macOS 10.14 Mojave amd64 Windows 10 amd64 There are_linux_coverage: build + test + generation of test coverageakiness hunter) nightly_linux_repeated nightly_linux-aarch64_repeated nightly_osx_repeated nightly_win_rep Coverage: nightly_linux_coverage: build + test the code + analyses coverage for c/c++ and python results are exported as a cobertura report packaging (run every night; result is bundled into an archive): packaging_linux packaging_osx packaging_windows Two additional build farms support the ROS / ROS 2 ecosystem by providing building of source and binary packages, continuous integration, testing, and analysis. For details, frequently asked questions, and troubleshooting see build farms. Note on Coverage runs ROS 2 packages are organized in a way that the testing code for a given package is not only contained within the package, but could also be present in a different package. In other words: packages can exercise code belonging to other packages during the testing phase. To achieve the coverage rate reached by all code available in the ROS 2 core packages it is recommended to run builds using a fixed set of proposed repositories. That set is defined in the default parameters of coverage jobs in Jenkins. How to read the coverage rate from the buildfarm report To see the coverage report for a given package: When the ci_linux_coveragebuild finishes, click on Coverage Report Scroll down to the Coverage Breakdown by Packagetable In the table, look at the first column called “Name” The coverage reports in the buildfarm include all the packages that were used in the ROS workspace. The coverage report includes different paths corresponding to the same package: Name entries with the form: src.*.<repository_name>.<package_name>.*These correspond to the unit test runs available in a package against its own source code Name entries with the form: build.<repository_name>.<package_name>.*These correspond to the unit test runs available in a package against its files generated at building or configuring time Name entries with the form: install.<package_name>.*These correspond to the system/integration tests coming from testing runs of other packages How to calculate the coverage rate from the buildfarm report Get the combined unit coverage rate using the automatic script: - From the ci_linux_coverage Jenkins build copy the URL of the build - Download the get_coverage_ros2_pkg script - Execute the script: ./get_coverage_ros2_pkg.py <jenkins_build_url> <ros2_package_name>(README) - Grab the results from the “Combined unit testing” final line in the output of the script Alternative: get the combined unit coverage rate from coverage report (require manual calculation): When the ci_linux_coverage build finishes, click on Cobertura Coverage Report Scroll down to the Coverage Breakdown by Packagetable In the table, under the first column “Name”, look for (where <package_name> is your package under testing): all the directories under the pattern src.*.<repository_name>.<package_name>.*grab the two absolute values in the column “Lines”. all the directories under the pattern build/.<repository_name>.*grab the two absolute values in the column “Lines”. With the previous selection: for each cell, the first value is the lines tested and the second is the total lines of code. Aggregate all rows for getting the total of the lines tested and the total of lines of code under test. Divide to get the coverage rate. How to measure coverage locally using lcov (Ubuntu) To measure coverage on your own machine, install lcov. sudo apt install -y lcov The rest of this section assumes you are working from your colcon workspace. Compile in debug with coverage flags. Feel free to use colcon flags to target specific packages. colcon build --cmake-args -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} --coverage" -DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} --coverage" lcov requires an initial baseline, which you can produce with the following command. Update the output file location for your needs. lcov --no-external --capture --initial --directory . --output-file ~/ros2_base.info Run tests for the packages that matter for your coverage measurements. For example, if measuring rclcpp also with test_rclcpp colcon test --packages-select rclcpp test_rclcpp Capture the lcov results with a similar command this time dropping the --initial flag. lcov --no-external --capture --directory . --output-file ~/ros2.info Combine the trace .info files: lcov --add-tracefile ~/ros2_base.info --add-tracefile ~/ros2.info --output-file ~/ros2_coverage.info Generate html for easy visualization and annotation of covered lines. mkdir -p coverage genhtml ~/ros2_coverage.info --output-directory coverage
https://docs.ros.org/en/foxy/Contributing/Developer-Guide.html
CC-MAIN-2022-21
refinedweb
5,582
52.6
This document is intended to give a thorough view of the design and implementation of the Google Data Scala client library. If you want to know more about how this library works, or contribute to the project, this is the starting point. It assumes the reader is familiar with Scala, the Google Data API protocol, Atom and the Atom Publishing Protocol. Note: The Scala client library needs a working Scala installation newer than 2.7.1. It also works with the current 2.8.0-beta. This section describes the steps to take if you want to start developing the library. It describes how to checkout the sources, build and run the tests. Everything is straight forward, so most readers used to Scala development can skip this section and come back if needed. Before you start, make sure you have installed the following software: Follow the instructions on the project website to checkout a working copy. Then cd into the directory and type $ ant build Make sure the environment variable SCALA_HOME points to your Scala installation. To build and run the tests, type $ ant test The build process can be configured by setting the appropriate values in build.properties. If you have installed emma, make sure you update the properties file to point to its installation directory: emma.dir=/usr/local/soft/emma-2.0.5312/lib Now you can get an html report of test coverage: $ ant coverage The Scala client library handles XML serialization, HTTP connection, authentication and query building for Google Data. The library is centered around serializing and deserializing XML, and provides a core set of classes called Picklers, on which all the serialization code is based. The object model is decoupled from serialization code, and the library provides classes for Atom and Google Data common elements. Supported services, like YouTube or Calendar, define new model classes and picklers, using exactly the same approach as the core library. Users that need to extend the library (for a new service, or to accommodate an extension to an existing service) can use the same approach. The library provides support for making queries and updating data through HTTP. The approach is straight forward, and similar to the road taken by the Java client library. Users connect to Google services through Service objects, that provide methods for making queries and updating data on the server. Queries are encoded as specific URLs, and the Query builder provides a natural syntax for expressing complex queries. These are the objects that are most visible to the users of this library. The library goes to great lengths to provide a type-safe layer on top of the underlying XML protocol. Whenever possible, Scala data types are used to represent the underlying data. Here is a high level view of the conventions followed throughout the library, that should be followed by all extensions: Along with the core classes for XML serialization (picklers), there are classes that model the various data used by the Google Data protocol (like feeds, contacts, calendar entries, etc) and HTTP connection classes. The library is split in packages along the following lines: The serialization code is built around pickler combinators. A pickler is an object that can serialize and deserialize some type. The library provides implementations for base cases, and combinators for assembling more sophisticated picklers out of simple ones. This should strike a familiar note if you've ever used the combinator parsing library in the Scala distribution. Pickler combinators are implemented in com.google.xml.combinators. The Pickler interface needs just two methods: abstract class Pickler[A] { def pickle(v: A, in: XmlOutputStore): XmlOutputStore def unpickle(in: XmlInputStore): PicklerResult[A] } The input and output types are abstractions over the XML representation. The result type of the unpickle method is a PicklerResult, which can be either Success or Error. All errors are issued through PicklerResult values, as this allows combinators to decide later if an error should fail the whole pickler, or try another alternative. The library provides picklers for primitive types. It is interesting to note that they do not commit to an attribute or an element content. Combinators will decide that when they wrap one of the basic picklers. Combinators are functions that take as arguments one or more picklers, and return a pickler for a more complex data type. Sometimes they return a pickler for the same data type as their argument, but perform some processing on input, like attr and elem. To keep the exposition clear, descriptions of picklers will talk from the point of view of unpickling (parsing the input), but keep in mind that the pickling part is always implied: All this has been pretty abstract, so now we turn to a simple example. Let's say we need to write a pickler for Google rating elements, with the following schema: rating = element gd:rating { gdCommonProperties, attribute rel { xs:string }?, attribute value { xs:int }?, attribute average { xs:float }?, attribute min { xs:int }, attribute max { xs:int }, attribute numRaters { xs:int }? } We start by defining a class for ratings: case class Rating(average: Option[Double], min: Int, max: Int, numRaters: Option[Int], rel: String, value: Option[Int]) { } We follow the conventions and turn all elements and attributes into fields with the same name. Optional elements get an Option type. Next we need to define a pickler for ratings in the companion object: object Rating { import Uris.gdNs import Picklers._ def pickler: Pickler[Rating] = elem("rating", opt(attr("average", doubleVal)) ~ attr("min", intVal) ~ attr("max", intVal) ~ opt(attr("numRaters", intVal)) ~ default(attr("rel", text), "overall") ~ opt(attr("value", intVal)))(gdNs) } The pickler definition reads almost like an RNG schema for the rating element. The default combinator provides a value in case the attribute rel is not present. The last argument to elem is the element namespace (in this case, the Google data namespace). If you try to compile this code, you'll notice there is a type error: The return type of pickler is not Pickler[Rating], but some complex type involving ~. The reason has to do with the sequence combinator, which returns picklers for a pair-like type formed by the two picklers: def seq[A, B](pa: => Pickler[A], pb: => Pickler[B]): Pickler[~[A, B]] As in the combinator parsing library, ~ is both a convenience method in trait Pickler, and a holder class. To fix this error, we need the wrap combinator We can fix our code by giving the library a way to transform between our type to the type it understands. The wrap combinator does just that: Given a pickler for some type A, and two functions f: A => B and g: B => A, it gives back a pickler for type B. def wrap[A, B](pb: => Pickler[A])(g: A => B)(f: B => A): Pickler[B] def rawPickler = // the previous definition def pickler: Pickler[Rating] = wrap (rawPickler) { case avg ~ min ~ max ~ numRaters ~ rel ~ value => Rating(avg, min, max, numRaters, rel, value) } (fromRating) private def fromRating(r: Rating) = (new ~(r.average, r.min) ~ r.max ~ r.numRaters ~ r.rel ~ r.value) Thanks to type inference, we got away easy: all types are filled in by the compiler. The first argument to wrap is the raw pickler, the second one is a function (using Scala's support for patterns as partial functions) that constructs Rating objects out of the pair-like structure. The last argument breaks a Rating object into pairs. Oh, and one more thing: because Rating is a case class, we can use the automatically generated functions to get a much cleaner definition: def rawPickler = // as before def pickler: Pickler[Rating] = wrap (rawPickler) (Rating.apply) (Rating.unapply) This code uses implicit conversions behind the scenes to adapt the given functions to the expected types. There is one sad thing, though: ticket #508. For the moment, the unapply method cannot be used. The Google Data protocol is highly extensible. Most elements can be extended with new attributes or elements, and the library provides a solution based on picklers. The basic idea is to collect any unparsed content of an element and store it as XML. An extension is then just another pickler, which combined with an extensible element operates on the collected data. Here's an example involving the Atom link element. case class Link(href: String, rel: Option[String], tpe: Option[String], hrefLang: Option[String], title: Option[String], length: Option[String]) extends HasStore object Link { implicit val nsAtom = Uris.atomNs val contentsPickler: Pickler[Link] = wrap(attr("href", text) ~ opt(attr("rel", text)) ~ opt(attr("type", text)) ~ opt(attr("hrefLang", text)) ~ opt(attr("title", text)) ~ opt(attr("length", text))) (Link.apply) (toPair) lazy val pickler: Pickler[Link] = elem("link", makeExtensible(contentsPickler)) The interesting thing to note is the call to makeExtensible on the contentsPickler. This combinator simply stores whatever was not parsed by the given pickler into a field of the Link class. Note that Link extends HasStore, a trait that declares a store field for that purpose. Remember that, even though we talk only about parsing, the store goes both ways: when pickling a Link, all unknown elements are pickled too. Suppose link elements are extended with a child element called webContent: extend(pickler, elem("webContent", text)(Uris.gCalNs)) This returns a pickler that handles the additional element. This scheme works fine for simple cases, but when a class contains objects of a class that was extended, this scheme will lose type information: the container will refer to its members by a super type. This is the case with feeds and entries, and the solution is described in a dedicated section. The Atom syndication protocol defines common constructs and the basic structure of feeds and entries. Their model classes and picklers are found under com.google.gdata.data, and follow the pattern described above with one exception: feeds and entries, which are described in the next section. The library uses a custom DateTime class, whose definition and picklers are found in com.google.gdata.data.util. This class handles time zones and parses dates in the RFC 3339 format. Google defines a number of common classes, and their implementation is found in package com.google.gdata.data.kinds. Implementation is straight forward. The interesting cases are FeedLink and EntryLink, which are special because they might enclose a feed or entry element, and are described below. Feeds are at the center of the Google Data API. Each kind of data that is published by a service is represented as a feed. A feed contains entries along with metadata (such as author, id, or publish date). In turn, each entry represents a specific kind of data, like videos, events, messages. As such, entries are by far the most extended structure in Google Data. Going back to the extensibility issue, when modeling Atom feeds, we are faced with the choice of a type for entries. Our first attempt might look like this: class AtomFeed { var author: String ... var entries: List[AtomEntry] } But what happens when we implement YouTube video feeds? We will extend AtomEntry to define VideoEntry, but the feed will still 'know' only about AtomEntry, and user code would need to down cast. Worse, feeds are often interconnected: a video entry has a comments feed, a related video feed and a user profile feed. All these have different type of entries, who can in turn be extended later. Redefining each feed for each extension (and the transitive closure of its uses) is clearly not a scalable solution. In turn, we abstract over the type of entries, and let feeds and entries evolve independently, combining them using mixin composition. The cake pattern is used when different components need to abstract over their dependencies, evolve independently and do so in a type safe way. In our case, feeds and entries are the components that need to inter-operate, but the dependencies should not be hard coded in either of them. We start by defining a trait for Entries: trait Entries { type Entry <: HasStore def entryPickler: Pickler[Entry] = elem("entry", makeExtensible(entryContentsPickler))(Uris.atomNs) def entryContentsPickler: Pickler[Entry] } This component provides an abstract type Entry: all components using entries use this abstract type when referring to an entry. This allows them to work with different implementations of Entries. It also provides a pickler for this abstract type, expressed in terms of a pickler for entry contents. The entryContentsPickler is a method that needs to be defined by concrete implementations, and which should remain abstract until the type is fixed to a concrete type. Next we look at a component for feeds. Since feeds depend on entries, we'll use a self type annotation to express this requirement: trait Feeds { this: Feeds with Entries => type Feed <: Seq[Entry] with HasStore def feedPickler: Pickler[Feed] = elem("feed", makeExtensible(feedContentsPickler))(Uris.atomNs) def feedContentsPickler: Pickler[Feed] This component is very similar to the previous one, except for the self type annotation, that reads like 'all instances that mix in Feeds should also mix in Entries'. This allows Feeds to be defined in terms of the abstract type Entry, for instance by making them implement Seq[Entry]. Once we have defined feed and entry components, we can refine them to model the most basic feeds: atom feeds. At each step, we need to refine the abstract type and define a contents pickler for the new bound: trait AtomEntries extends Entries { type Entry <: AtomEntry class AtomEntry extends AnyRef with LinkNavigation with HasStore { var authors: List[Person] = Nil var categories: List[Category] = Nil // ... } lazy val atomEntryContentsPickler: Pickler[AtomEntry] = wrap (...) ({ case authors ~ cats ~ .. => (new AtomEntry).fillOwnFields(authors, cats, content, contribs, id, links, published, rights, src, summary, title, updated) }) (fromEntry) } This new component provides a more specific type of entries, and defines the AtomEntry class along with its pickler. Note that the contents pickler method is not implemented yet: instead, a atomEntryContentsPickler is provided. This allows future extensions, that can reuse the atom entry pickler. Similarly, atom feeds follow: trait AtomFeeds extends Feeds { this: AtomFeeds with Entries => type Feed <: AtomFeed with HasStore class AtomFeed extends AnyRef with Seq[Entry] with HasStore { var authors: List[Person] = Nil // .. var entries: List[Entry] = Nil } lazy val atomFeedContentsPickler: Pickler[AtomFeed] = wrap (interleaved(rep(atomPerson("author")) ~ rep(entryPickler))) ({ case authors ~ entries => new AtomFeed(...) }) (fromAtomFeed) } Notice how the pickler is using the abstract method entryPickler to handle the abstract Entry type. To bring everything together, we need to come up with a concrete class that can be instantiated. This means fixing the abstract types, and their picklers. This is usually done at the point of use, as after types are concrete, no further refinement is possible. We choose to define named classes for all feeds, as it is very likely to use the same feed in more than one place. The name should be the name of the feed type, prefixed by Std: class StdAtomFeed extends AtomFeeds with AtomEntries { type Feed = AtomFeed type Entry = AtomEntry def feedContentsPickler = atomFeedContentsPickler def entryContentsPickler = atomEntryContentsPickler } Now we can instantiate standard atom feeds and use the types and picklers. For example, the following code is unpickling a feed from a file: val atomFeed = new StdAtomFeed val is = new FileInputStream(...) val xmlStore = LinearStore.fromInputStream(is) atomFeed.feedPickler.unpickle(xmlStore) match { case Success(feed, _) => println('Unpickled feed: ' + feed) case f: NoSuccess => println(f.toString) } The unpickle method takes an XmlInputStore as parameter, so we need to create one based on the input stream. Then we match on the result, to check if the parsing was successful or not. In this section we lay out the pattern for refining entries and feeds. This should be the way new feeds are added to the library. The goals are to have a consistent feel, and limit the amount of code a subclass needs to write in order to reuse super class picklers. During this section we'll talk about entries, but keep in mind that the same pattern applies to feeds as well. All new entries should subclass AtomEntries and give a more specific upper bound to the abstract type of entry. They should also provide a pickler, named after the entry type plus the suffix ContentsPickler, implemented in terms of the superclass pickler. trait VideoEntries extends AtomEntries { type Entry <: VideoEntry class VideoEntry extends AtomEntry { var noembed: Boolean = false var restricted: Boolean = false // .. } def videoEntryContentsPickler: Pickler[VideoEntry] = wrap (atomEntryContentsPickler ~ videoEntryExtra) ({ case ae ~ (noembed ~ restricted) => val me = new VideoEntry me.fromAtomEntry(ae) me.fillOwnFields(noembed, restricted) }) (fromVideoEntry) //... } In this example we have assumed the extra fields of video entries have been gathered in their own pickler videoEntryExtra, but this is not always necessary. The pickler is using the atomEntryContentsPickler to parse everything the superclass may contain, and wrapping the contents pickler to instantiate video entries. The interesting bit is the following two lines, which fill the fields of video entries: These two methods should be implemented by all entries. Let's go back to the definition of VideoEntry and add the necessary methods: class VideoEntry extends AtomEntry { // .. def fromVideoEntry(me: VideoEntry) { this.fromAtomEntry(me) fillOwnFields(me.media, me.noembed, me.restricted, me.viewCount, me.rating, me.comments) } def fillOwnFields(noembed: Boolean, restricted: Boolean): this.type = { this.noembed = noembed this.restricted = restricted this } // .. } Notice how the fromVideoEntry method is implemented in terms of fromAtomEntry and fillOwnFields (in our previous definition of AtomEntry we have glossed over the implementation of these two methods). This has the nice effect that any extension requires code that is proportional in size to the delta the extension introduces. You can check how that works in PlaylistVideoEntries, which extends video entries even further. To sum it up, a new refinement of Entries should: If the entry is part of a new feed, Many times a feed or an entry references another feed. For instance, video entries need to refer to comments, which have a feed of their own. The way to refer to comment feeds is to declare an abstract value of the feed type, and use the Feed and Entry types defined by that value. trait VideoEntries { val commentsFeed: StdAtomFeed // ... val comments: commentFeed.Feed } This declaration makes it explicit that video entries depend on a comment feed implementation. Clients of video entries will need to provide a concrete implementation for commentsFeed. Most likely this will be in a Service implementation, and such an implementation will want to make sure the same comments component is used by all components in the service. To achieve that, it needs to override the comments value with a singleton type. class YouTubeService { val videos = new StdVideoFeed { override lazy val commentsFeed: YouTubeService.this.comments.type = comments } val comments = new StdCommentsFeed //.. } This code says to the compiler that the value comments and videos.commentsFeed are the same, and therefore the Entry type in comments is the same as the Entry type in videos.commentsFeed. Without this wiring, the compiler would think that comments.Feed and videos.commentsFeed.Feed are different types, as the paths are different. Google defines two elements, feedLink and entryLink, which represent either the link to a feed, or an embedded feed (we talk only about feeds, but the same applies to entries). Because they might enclose a feed, their pickler needs to be able to handle an arbitrary embedded feed. They simply abstract over the type and pickler for the embedded feed. class FeedLink[Feed] extends HasStore { // .. } object FeedLink { def contentsPickler[F](feedPickler: Pickler[F]): Pickler[FeedLink[F]] } Google Data implements queries using GET requests to a specific URL. The library provides a class for building URLs using a nice syntax. Queries have two components: a category part, and a search part. Categories are introduced by a forward slash, while the search part is introduced by matching. They may have a number of modifiers, like maxResults or orderBy, to further control the result set. The base class Query defines the standard query syntax and handles the encoding of parameters into a URL. Supported services define subclasses of Query to handle new, specific parameters. To build a query that matches several categories, start with a Query object and add categories separated by /. This method is defined in the Query class and takes a CategoryQuery. To make more complex queries, you can use | and ! to build alternatives and negation, respectively. These methods are defined in the CategoryQuery class. One obtains the URL by passing a base URL to mkUrl: scala> var q1 = Query.empty / "Comedy" / "Fun" scala> q1.mkUrl("") res6: String = scala> q1 = Query.empty / (cat("Comedy") | cat("Fun")) scala> q1.mkUrl("") res5: String = To make a text query, start with a Query object and call matching, passing an instance of SearchQuery. Search queries are built using &, |, ! and Text instances, with the usual meaning. For instance, the following query is translated to an URL by passing a base URL: scala> q1 = Query.empty matching Text("fun") & !Text("office") scala> q1.mkUrl("") res13: String = In addition to category and search queries, one can control the result set by adding modifiers. Modifiers are additional parameters that get shipped with a query, and control the number of results or ordering. This last example shows a query having all parts described above: scala> q1 = Query.empty / "Comedy" matching Text("fun") & !Text("office") maxResults(10) scala> q1.mkUrl("") res15: String = Most services accept additional parameters. Supported services get a specialized query class that implements such new features. All new parameters should be implemented in terms of Query.addParam, for instance the YouTubeQuery adds an 'orderBy' parameter: class YouTubeQuery extends Query { def orderBy(ordering: String): this.type = addParam("orderby", ordering) } The library defines helper classes for making Google Data requests and handling authentication. The Service class holds everything together: The request factory creates requests and handles common headers that should be added to each request. The Service class supports stateful services, like calendar, by handling redirects with a gsessionid parameter by saving it and shipping it with future queries. It also provides generic query methods that use picklers for handling request/response content. Any pickling errors are translated to exceptions at this point. For example, the YouTubeService class provides convenient methods for accessing video feeds by hiding the concrete picklers and URLs to which queries are made: class YouTubeService { val videos = new StdVideoFeed { override lazy val commentsFeed: YouTubeService.this.comments.type = comments } // .. /** Return a video feed matching the given query. */ def getVideos(q: Query): videos.Feed = { query(q.mkUrl(YouTubeService.BASE_VIDEO_FEED), videos.feedPickler) } } To support a new service, one needs to All these additional classes should go into a service-specific package below com.google.gdata. The library has a collection of unit tests. They are organized in the same packages as the classes they test, but live under tests/ instead of src/. Unit tests are written using junit 4. Most tests are straight forward, but feed tests are more interesting. Each feed test unpickles a saved feed under test-data, then pickles it back and uses xml-test to check that the resulting XML document matches the input. This ensures no input elements are lost. To write a test for a new feed, get an XML document retrieved from the server, by using curl for instance. Save it under test-data/feeds, then write a new test class that extends FeedFileTest. class YouTubeFeedsTest extends AnyRef with FeedFileTest { @Test def testVideoFeed { testRoundtrip("feeds/video-feed-in.xml", (new StdVideoFeed).feedPickler, "//rating") } //... } testRoundtrip takes the input file, the pickler and any number of XPath elements that should be ignored when comparing.
http://code.google.com/p/gdata-scala-client/wiki/DevelopersGuide
crawl-003
refinedweb
3,904
54.52
jedidja.ca Better Living Through Blitting Our cooking/coding/kick-ass little group, Freshly Coded, has a graphics wizard: Nick. He is the magic behind all the nice UI and crazy-fast graphics you’ll see over on our joints project page. Normally I stick to developer-driven testing, refactoring / architecture, and regular coding but this time I’m going to try doing some graphics coding myself … everyone cross their fingers. It may be easier to forget what you know about WPF and just start “fresh” with the Windows 8 Xaml classes. We still have WriteableBitmap (there’s no InteropBitmap) but its usage is a bit strange. Further, it’s not immediately obvious how you can write to the buffer. Finally, here are our starting conditions: - Each pixel on the screen is represented by 32 bits (or 4 bytes) in BGRA format. - Each cell is a 5x5 pixel square. - The entire game is a grid of 96x96 cell. Probably the best place to begin is with CellMapDisplay. It needs to change from using a Canvas and Rectangles to a WriteableBitmap that’s connected to an on-screen Image. Along with the WriteableBitmap, we need a buffer to store the pixels we're going to display and some helper methods to draw the cells themselves. public class CellMapDisplay { private readonly WriteableBitmap bitmap; private readonly uint numberCellsAcross; private readonly uint numberCellsDown; private readonly int cellSize; public byte[] cells; private int bytesPerCellLine; public CellMapDisplay(Image image, uint numberCellsAcross, uint numberCellsDown, int cellSize) { bitmap = new WriteableBitmap( (int)numberCellsAcross * cellSize, (int)numberCellsDown * cellSize); image.Source = bitmap; this.numberCellsAcross = numberCellsAcross; this.numberCellsDown = numberCellsDown; this.cellSize = cellSize; bytesPerCellLine = (int)numberCellsAcross * cellSize * cellSize * 4; cells = new byte[bytesPerCellLine * numberCellsDown]; for (int x = 0; x < cells.Length; x += 4) { cells[x] = 0; cells[x + 1] = 0; cells[x + 2] = 0; cells[x + 3] = 0xff; } } public void DrawCell(uint x, uint y, bool on) { byte value = (byte)(on ? 1 : 0); var lineLeft = bytesPerCellLine * y + (x * cellSize * 4); for (int celly = 0; celly < cellSize; celly++) { for (int cellx = 0; cellx < cellSize; cellx++) { var pixel = lineLeft + (cellx * 4); cells[pixel] = value; cells[pixel + 1] = value; cells[pixel + 2] = value; cells[pixel + 3] = 0xFF; } lineLeft += (int)numberCellsAcross * cellSize * 4; } } public async void UpdateScreen() { using (var stream = bitmap.PixelBuffer.AsStream()) { await stream.WriteAsync(cells, 0, cells.Length); } bitmap.Invalidate(); } } In case you're trying this and wonder why bitmap.PixelBuffer.AsStream() doesn't seem to compile, the extension method AsStream() resides in System.Runtime.InteropServices.WindowsRuntime — something that isn't immediately obvious in the documentation. The changes to CellMap (call UpdateScreen at the start of NextGeneration) and Image rather than the Canvas) are straightforward and we are ready to run again. And ... wow! This is an order of magnitude faster. What’s even funnier (?) is now the drawing is so fast that it’s the same order of magnitude as updating our generation counter. I think we’re in a pretty good state to start exploring Windows 8-specific code so I’ll wrap up this section (for now). Next time, we tackle Live Tiles. (Code so far)
http://www.jedidja.ca/better-living-through-blitting/
CC-MAIN-2018-05
refinedweb
514
56.35
Eikon Data API Start with the Eikon Data API for Python in 4 steps - Run the Eikon application - Create an App Key for your application - Install the Python library for Eikon Data API - Let's have some fun ! 1. Run the Eikon application The Eikon application integrates a Data API proxy that acts as an interface between the Eikon Data API Python library and the Eikon Data Platform. For this reason, the Eikon application must be running when you use the Eikon Data API Python library. When launched, the Eikon application starts listening for local websocket connections on port 9000 or the next available port if port 9000 is already occupied. You need to have a valid Eikon user account to be able to launch the Eikon application. 2. Create an App Key for your application Every application using the Eikon Data API must identify itself thanks to a Key called Application Key or App Key for short. This Key, that is a unique identifier for your application, must be created using the App Key Generator Eikon app that you can find in the Eikon App Library or via the toolbar of Eikon. Here is how to run it and how to create an App Key for a new application: - In the Eikon toolbar type "App Key" and run the App Key Generator app: - In the App Key Generator app indicate the name of your new application, select the APIs it uses and click on the "Register New App" button: - Read the "Terms & Conditions" and click on the "Accept" button - Copy the App Key that you will use later in your new application: 3. Install the Python library for the Eikon Data API The Eikon Data API Python library is an ease of use library, which conveniently wraps the raw message transcription between Eikon and Python and provides user friendly data retrieval calls. The data output from the Eikon Data API Python library is available as Pandas DataFrames or as JSON objects. Note: if you do not have a Python environment, please refer to the Setting up a Python development environment tutorial to set one up before following the next steps. Run the following command to install the Eikon package from the Python Package Index (PyPI): pip install eikon Note: This command must be run at the prompt of the command-line interpreter (CMD). If the "Script/" folder of your Python installation is not included in your PATH environment variable, you may even have to navigate to this folder before you run the pip command. Below is an example of the expected output C:\> C:\>cd Python36-32\Scripts C:\Python36-32\Scripts>pip install eikon Collecting eikon Using cached eikon-0.1.11-py3-none-any.whl Collecting websocket-client (from eikon) Using cached websocket_client-0.46.0-py2.py3-none-any.whl ... Installing collected packages: six, websocket-client, zope.interface, pytz, date time, appdirs, numpy, python-dateutil, pandas, certifi, chardet, idna, urllib3, requests, eikon Successfully installed appdirs-1.4.3 certifi-2018.1.18 chardet-3.0.4 datetime-4.2 eikon-0.1.11 idna-2.6 numpy-1.14.0 pandas-0.22.0 ython-dateutil-2.6.1 pytz-2017.3 requests-2.18.4 six-1.11.0 urllib3-1.22 websocket-client-0.46.0 zope.interface-4.4.3 C:\Python36-32\Scripts> import eikon as ek ek.set_app_key('8e5a3xxxxxxxxxxxxxxxxxxxxxxxxxxxx21b031c') Note: In earlier versions of the Python library for Eikon Data APIs, the set_app_key function was called set_app_id. The set_app_id function is now deprecated but still works. If you use an earlier version of the Python library you can call set_app_id to set the App Key of your application. Great, you have imported the library and set the App Key of your application. You are now ready to send data retrieval requests. The following instruction retrieves news headlines on Deutsche Lufthansa AG (equity RIC: LHAG.DE), between 09:00 and 18:00 GMT on the 5th of Apr 2017. ek.get_news_headlines('R:LHAG.DE', date_from='2019-03-06T09:00:00', date_to='2019-03-06T18:00:00') out: Now, let's display the latest news story satisfying the news search expression "EU AND POL", which represents news on the European Union politics. First, retrieve the news headlines using the search expression, then get the story ID from the response and finally request the story. headlines = ek.get_news_headlines('EU AND POL',1) story_id = headlines.iat[0,2] ek.get_news_story(story_id) The output is the HTML of the news story. The following commands return time series of daily price history for Microsoft Corp ordinary share between 1st of Jan and 10th of Jan 2016. df = ek.get_timeseries(["MSFT.O"], start_date="2016-01-01", end_date="2016-01-10") df out: The following commands retrieve fundamental data - Revenue and Gross Profit - for Google, Microsoft abd Facebook df, err = ek.get_data(['GOOG.O','MSFT.O', 'FB.O'], [ 'TR.Revenue','TR.GrossProfit']) df out: You can specify additional parameters and request full year revenue and gross profit for the last two years scaled to millions and converted to Euros. E.g. df, err = ek.get_data(['GOOG.O', 'MSFT.O', 'FB.O', 'AMZN.O', 'TWTR.K'], ['TR.Revenue.date','TR.Revenue','TR.GrossProfit'], {'Scale': 6, 'SDate': 0, 'EDate': -2, 'FRQ': 'FY', 'Curn': 'EUR'}) df out: You can also mix fundamental data with a snapshot of market data. df, err = ek.get_data(['VOD.L', 'FB.O'], [ 'TR.Revenue', 'TR.GrossProfit', 'CF_LAST']) df
https://developers.uat.refinitiv.com/en/api-catalog/eikon/eikon-data-api/quick-start
CC-MAIN-2022-27
refinedweb
911
58.21
Netwire. Contents. You need to import the Control.Wire module to work with wires: import Control. The inhibition monoid The e argument to Wire is called the inhibition monoid. For simple applications you can just use () here, but you may want to actually assign exception values to inhibition. We will cover that later. For now just use (). Base arrows The (>~) argument to Wire is called the base arrow. In most cases you will use a Kleisli arrow here, and this is currently the only type of arrow supported, though more will be added in the future. For simple applications you can just use the IO monad, and it is useful to define a type alias for your custom wire type: type MyWire = Wire () (Kleisli IO) Running wires For running a wire you can use the stepping functions available in the Control.Wire.Session module. There is no need to import that module. It is automatically imported with Control.Wire. For Kleisli-based wires you will want to use the stepWireM function: stepWireM :: Monad m => Wire e (Kleisli m) a b -> a -> m (Either e b, Wire e (Kleisli m) a b) In our case we have m = IO, so our type signature is simply: stepWireM :: MyWire a b -> a -> IO (Either () b, MyWire a b) This function takes a wire and an input value. It passes the input value to the wire and returns its result value of type Either () b. Along with the result it also returns a new wire. Normally you would call stepWireM in a loop, which performs instant after instant. This is the basic structure:. Testing wires There is a convenient function for testing wires, which does all the plumbing for you. It's called testWireM: testWireM :: (Show e, MonadIO m) => Int -> m a -> Wire e (Kleisli m) a String -> m () prints the output continuously on a single line: main :: IO () main = testWireM 1000 (return 15) system The constant function takes an output value and produces a wire which produces that value constantly. So the wire constant 15 will output 15 constantly at every instant. In other words, stepWireM will return Right 15 along with a new wire that outputs 15 again: stepWireM (constant 15) inp -> (Right 15, constant 15) Note the fully polymorphic input type a. This basically means that the wire disregards its input, so whatever inp is, it is ignored. The identity wire is slightly more interesting. It has input and output of type b. What it does is: It simply outputs its input value at every instant:: stepWireM (countFrom 15) inp -> (Right 15, countFrom 16) stepWireM (countFrom 16) inp -> (Right 16, countFrom 17)
https://wiki.haskell.org/index.php?title=Netwire&direction=next&oldid=43313
CC-MAIN-2022-27
refinedweb
441
70.02
My idea is to construct a "tied Singleton" by both inheriting from tie::refhash::nestable and Class::StrongSingleton. What I did yet in my module file: package GlobalHash; use vars qw(@EXPORT @EXPORT_OK $VERSION $REVISION $AUTOLOAD @ISA); use Exporter; ... use Tie::RefHash; use Class::StrongSingleton; use base qw(Class::StrongSingleton Tie::RefHash::Nestable ); sub TIEHASH { my $proto = shift; my $class = ref($proto) || $proto; my $self = {@_}; $self = Tie::RefHash::Nestable->TIEHASH($self, $class); # initialize it as a singleton $self->_init_StrongSingleton(); $self->{'START'} = scalar localtime; return $self; } [download] Any help welcome Hoppfrosch my $class = ref($proto) || $proto; [download] my $class = ref($proto) || $proto; [download] Now, it looks like you're enamored with shiny objects. Let's get back to the basics. You want a hash that is in every namespace? Then simply create a module that populates the hash and exports it: package MyGlobalHash; use base Exporter; @EXPORT = qw(%MyGlobalHash); %MyGlobalHash = (foo => "Hello, world!\n"); 1; [download] package RandomPackage; use MyGlobalHash; ... print $MyGlobalHash{"foo"}; [download] Also keep in mind that I hate global data. You should export behavior, not data. You should avoid global data as much as possible, because it creates nasty couplings that keep you from refactoring or reusing or testing code well. -- Randal L. Schwartz, Perl hacker Be sure to read my standard disclaimer if this is a reply. I agree with your aversion to global variables, but since I'm currently reengineering stepwise a large piece of code (which makes intensive use of global variables) I have to use global variables (in my first reengineering iteration) as well to keep the code working. On the other hand: What speaks against a global variable keeping all information needed anywhere in the code? An alternative might be passing the data by parameters - which causes a lot of typo-overhead, since the data is needed anywhere in the code. You were partially right with your conclusion "you're enamored with shiny objects" - first I had a singleton as global data structure. Then I wanted to have an easy access to the data members like hash access. This results in my idea of the marriage ... But your suggestion only meets half the truth: I'm not able to populate the hash within an unique action - the contents of the hash has to grow/change during runtime ... Therefore I need a dynamic - and not a static - global data structure. Beside all aspects that speak against my suggested solution: As I'm keen to improve my perl knowledge, I would like to have a solution to my problem with "multiple inheritance" and correct initialization ... merlyn isn't saying you shouldn't have a global repository for information. What he is saying is that you should be exporting functions/methods that the code can call to access said information. Then, if you ever need to log something or modify something as it's being set or retrieved, you have one place to do that. Or, let's say you're moving from a file-based system to a DB-based system. Bite the bullet and change everything to a pure singleton and call the methods to get at or set the data. This is even more important with setting data. (Though, this is starting to sound more like a stash vs. a configuration object ...) ... multiple inheritance ... Therefore I need a dynamic - and not a static - global data structure. Class::Singleton and its relatives really don't do that much for you. Look under the hood and you won't find that much code. The concept of a singleton is very simple and you can just implement the 5 lines of code in your module. You can see my article on Singletons in The Perl Review 0.1. The Cult of Design Patterns seems to think that you should make the end programmer realize he is using a singleton, and instead of new() use something like get_instance(). I think that's the wrong way to go: in most cases the end programmer shouldn't have to think about that as long as everything works. That doesn't matter much in your case since the magic is hidden in the Tie interface. Before you start doing things in TIEHASH, however, you need to check if you already have the instance, and if you do, simply return it. Don't make a new object then check for an old
http://www.perlmonks.org/index.pl?node_id=461989
CC-MAIN-2013-48
refinedweb
732
61.56
What you'll build In this codelab, we'll learn to use TensorFlow Lite For Microcontrollers to run a deep learning model on the SparkFun Edge Development Board. We'll be working with the board's built-in speech detection model, which uses a convolutional neural network to detect the words "yes" and "no" being spoken via the board's two microphones. Machine Learning on Microcontrollers Machine learning can be used to create intelligent tools that make users' lives easier, like Google Assistant. But often, these experiences require a lot of computation or resources that can include a powerful cloud server or a desktop. However, it's now possible to run machine learning inference on tiny, low-powered hardware, like microcontrollers. Microcontrollers are extremely common, cheap, require very little energy, and are very reliable. They are part of all sorts of household devices: think appliances, cars, and toys. In fact, there are around 30 billion microcontroller-powered devices produced each year. By bringing machine learning. TensorFlow Lite For Microcontrollers (Software) TensorFlow is Google's open source machine learning framework for training and running models. TensorFlow Lite is a software framework, an optimized version of TensorFlow, targeted to run tensorflow models on small, relatively low-powered devices such as mobile phones. TensorFlow Lite For Microcontrollers is a software framework, an optimized version of TensorFlow, targeted to run tensorflow models on tiny, low-powered hardware such as microcontrollers. It adheres to constraints required in these embedded environments, i.e, it has a small binary size, it doesn't require operating system support, any standard C or C++ libraries, or dynamic memory allocation, etc. SparkFun Edge (Hardware) The SparkFun Edge is a microcontroller-based platform: a tiny computer on a single circuit board. It has a processor, memory, and I/O hardware that allows it to send and receive digital signals to other devices. It has four software-controllable LEDs, in your favorite Google colors. Unlike a computer, a microcontroller doesn't run an operating system. Instead, the programs you write run directly on the hardware. You write your code on a computer and download it to the microcontroller via a device called a programmer. Microcontrollers are not powerful computers. They have small processors, and not much memory. But because they are designed to be as simple as possible, a microcontroller can use very little energy. Depending on what your program does, the SparkFun Edge can run for weeks on a single coin cell battery! What you'll learn - Compile the sample program for the SparkFun Edge on your computer - Deploy the program to your device - Make changes to the program and deploy it again What you'll need You will need the following hardware: - Linux or MacOS computer - SparkFun Edge board - SparkFun USB-C Serial Basic programmer - USB-C to USB-A cable (If you're on a USB-C computer, instead get USB-C to USB-C cable) - (optional) 3V 20mm coin cell lithium battery (CR2032) to run inference without a programmer and cable You will need the following software: - Git (check if it's installed by running giton the command line) - Python 3 (check if it's installed by running python3or python --versionon the command line) - Pip for Python 3 ( helpful StackOverflow answer) - Make 4.2.1 or higher (check if it's installed by running make --versionon the command line) - SparkFun Serial Basic drivers The SparkFun Edge microcontroller comes with a pre-installed binary that can run the speech model. Before we overwrite this with our own version, let's first run this model. Power your board by: - Inserting a coin cell battery into the battery connector on the back of the board (with the "+" side of the battery facing up. If your board came with a battery already inserted, pull out the plastic tab, and push the battery to ensure it's fully inserted) - If you don't have a coin battery, you can use the SparkFun USB-C Serial Basic programmer device to power the board.. - Connect a USB-C cable between the SparkFun USB-C Serial Basic and your computer. Once you've powered your board by inserting the battery or connecting the USB programmer, the board will wake up and begin listening with its microphones. The blue light should begin to flash.") We're now going to download, install and run the speech model on the microcontroller ourselves. For this, we first download the source code for this program and the dependencies we need to build it. The program is written in C++, which must be compiled into a binary before being downloaded onto the board. A binary is a file that contains the program in a form that can be run directly by the SparkFun Edge hardware. The following instructions are written for Linux or MacOS. Download the TensorFlow repo The code is available in the TensorFlow repository on GitHub, in the following location: Open a terminal on your computer, change to a directory where you usually store coding projects, download the TensorFlow repository and enter the directory created, as shown below: cd ~ # change into your home (or any other) directory git clone --depth 1 cd tensorflow Download Python dependencies We'll be using Python 3 to prepare our binary and flash it to the device. The Python scripts depend on certain libraries being available. Run the following command to install these dependencies: pip3 install pycrypto pyserial --user We're going to build the binary and run commands that prepare it for downloading to the device. Build the binary To download all required dependencies and create the binary, run the following command: make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge micro_speech_bin If the build works successfully, the final line of the output should appear as follows: arm-none-eabi-objcopy tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/micro_speech tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/micro_speech.bin -O binary To confirm that the binary was successfully created, run the following command: test -f \ tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/micro_speech.bin && \ echo "Binary was successfully created" || echo "Binary is missing" You should see Binary was successfully created printed to the console! If you see Binary is missing, there was a problem with the build process that will require debugging. Prepare the binary The binary must be signed with cryptographic keys to be deployed to the device. We'll now run some commands that will sign our binary so it can be downloaded to the SparkFun Edge. Enter the following command to set up some dummy cryptographic keys we can use for development: cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info0.py tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info.py Now, run the following command to create a signed binary: python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_image_blob.py \ --bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/micro_speech.bin \ --load-address 0xC000 \ --magic-num 0xCB \ -o main_nonsecure_ota \ --version 0x0 This will create the file main_nonsecure_ota.bin. We'll now run another command to create a final version of the file that can be used to flash our device with the bootloader script we will use in the next step: python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ --load-address 0x20000 \ --bin main_nonsecure_ota.bin \ -i 6 \ -o main_nonsecure_wire \ --options 0x1 You should now have a file called main_nonsecure_wire.bin in the directory where you ran the commands. This is the file we'll be flashing to the device. What is flashing? The SparkFun Edge stores the program it is currently running in its 512 kilobytes of flash memory. If we want the board to run a new program, we have to send it to the board, which will store it in flash memory, overwriting any program that was previously saved. This process is called "flashing", and we'll use it to send our program to the board. Attach the programmer to the board To download new programs to the board, we'll be using the SparkFun USB-C Serial Basic serial programmer. This device allows your computer to communicate with the microcontroller via USB.. Attach the programmer to your computer We'll be connecting the board to your computer via USB. To program the board, we'll need to know the name that your computer gives the device. The best way of doing this is to list all the computer's devices before and after attaching it, and look to see which device is new. Before attaching the device via USB, run the following command: If you are using Linux: ls /dev/tty* If you are using MacOS: ls /dev/cu* This should output a list of attached devices that looks something like the following: /dev/cu.Bluetooth-Incoming-Port /dev/cu.MALS /dev/cu.SOC Now, connect the programmer to your computer's USB port. Enter the following command again: If you are using Linux: ls /dev/tty* If you are using MacOS: ls /dev/cu* You should see an extra item in the output, as in the example below. Your new item may have a different name. This new item is the name of the device. /dev/cu.Bluetooth-Incoming-Port /dev/cu.MALS /dev/cu.SOC /dev/cu.wchusbserial-1450 First, we'll create an environment variable to identified the device name: export DEVICENAME=put your device name here Next, we'll create an environment variable to specify the baud rate, which is the speed at which data will be sent to the device: export BAUD_RATE=921600 Run the script to flash your board To flash the board, we have to put it into a special "bootloader" state that prepares it to receive the new binary. We'll then run a script to send the binary to the board. Let's get familiar with the following buttons on the board: Perform the following steps to reset and flash the board: - Ensure your board is connected to the programmer, and the entire setup is connected to your computer via USB. - Start holding the button marked 14on the board. Keep holding it until Step 6. - Still holding the button marked 14, in order to reset the board into its bootloader state, click the button marked RSTto reset the board. - Still holding the button marked 14, paste the following command into your terminal and hit enter to run it (For convenience, you can paste this command into your terminal before you start holding the button, but don't press enter until you reach this step) python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/uart_wired_update.py -b ${BAUD_RATE} ${DEVICENAME} -r 1 -f main_nonsecure_wire.bin -i 6 - Still holding the button marked 14, you should now see something like the following appearing on-screen: Connecting with Corvette over serial port /dev/cu.usbserial-1440... Sending Hello. Received response for Hello Received Status length = 0x58 version = 0x3 Max Storage = 0x4ffa0 Status = 0x2 State = 0x7 AMInfo = 0x1 0xff2da3ff 0x55fff 0x1 0x49f40003 0xffffffff [...lots more 0xffffffff...] Sending OTA Descriptor = 0xfe000 Sending Update Command. number of updates needed = 1 Sending block of size 0x158b0 from 0x0 to 0x158b0 Sending Data Packet of length 8180 Sending Data Packet of length 8180 [...lots more Sending Data Packet of length 8180...] - Stop holding the button marked 14on the board after seeing Sending Data Packet of length 8180(but it's okay if you keep holding it). The program will continue to print lines on the terminal. It will eventually look something like the following: [...lots more Sending Data Packet of length 8180...] Sending Data Packet of length 8180 Sending Data Packet of length 6440 Sending Reset Command. Done. If you see Done then this indicates a successful flashing. If the program output ends with an error, check if Sending Reset Command was printed. If so, flashing was likely successful despite the error. On a Linux machine, you may encounter a NoResponse Error. This is because the ch34x serial driver has been installed alongside the existing serial driver, which can be resolved as follows: Step 1: Re-install the correct version of the ch34x library. Ensure that the device is unplugged from the computer during the installation. git clone cd CH341SER/ make sudo insmod ch34x.ko sudo rmmod ch341 Step 2: Plug the board USB in and run: dmesg | grep "ch34x" You should see a message like this: [ 1299.444724] ch34x_attach+0x1af/0x280 [ch34x] [ 1299.445386] usb 2-13.1: ch34x converter now attached to ttyUSB0 If the driver used is not "ch34x" (eg: ch341), try disabling the other driver by running: rmmod <non-ch34x driver name> Unplug and replug the device and ensure that the driver being used is "ch34x". Try the program out Once your board has been successfully flashed, hit the button marked RST to restart the board and start the program. If the blue LED starts blinking, flashing is successful. If not, scroll down to the "What if it didn't work?" section below. ") What if it didn't work? Here are some possible issues and how to debug them: Problem: After flashing, none of the LEDs are coming on. Solution: Try hitting the RST button, or disconnecting and reconnecting the board from the programmer. If none of these work, try flashing the board again. Problem: The blue LED is lighting up, but it's very dim. Solution: Replace the battery as it's running low. Alternatively, the board can be powered by computer using the programmer and the cable. Review this section if you face issues and need to debug your code in detail. In order to understand what is going on in a microcontroller when your code runs, you can print debugging information through the board's serial connection. You use your computer to connect to the board and display the data that the board is sending. Open a serial connection By default, our SparkFun Edge sample code logs any spoken commands, along with their confidence. To see the board's output you can run the following command: screen ${DEVICENAME} 115200 You may initially see an output that looks something like the following: (This only appears if the board is reset once connected else you may start seeing debug information) Apollo3 Burst Mode is Available Apollo3 operating in Burst Mode (96MHz) Try issuing some commands by saying "yes" or "no". You should see the board printing debug information for each command: Heard yes (202) @65536ms In the above log, yes refers to the command. The number 202 refers to the level of confidence that the command was heard (with 200 being the minimum). Finally, 65536ms refers to the amount of time that has elapsed since the microcontroller was last reset. To stop viewing the debug output, hit Ctrl+A, immediately followed by the K key, then hit the Y key. Write debug logs You can see the code that logs this information in the command_responder.cc file you were just working with: tensorflow/lite/micro/examples/micro_speech/sparkfun_edge/command_responder.cc To log data, you can call the error_reporter->Report() method. It supports the standard printf tokens for string interpolation, which you can use to include important information in your logs: error_reporter->Report("Heard %s (%d) @%dms", found_command, score, current_time); This method should come in handy when you are making your own changes to the code in the next section. Now that you know how to build and flash your SparkFun Edge, you can start playing with the code and deploying it to your device to see the results. Read the code A good place to start reading the code is the following file, command_responder.cc. tensorflow/lite/micro/examples/micro_speech/sparkfun_edge/command_responder.cc You can see the file on GitHub here. The method in this file, RespondToCommand, is called when a voice command is detected. The existing code turns on a different LED depending on whether "yes", "no", or an unknown command was heard. The following snippet shows how this works: if (found_command[0] == 'y') { am_hal_gpio_output_set(AM_BSP_GPIO_LED_YELLOW); } if (found_command[0] == 'n') { am_hal_gpio_output_set(AM_BSP_GPIO_LED_RED); } if (found_command[0] == 'u') { am_hal_gpio_output_set(AM_BSP_GPIO_LED_GREEN); } The found_command argument contains the name of the command that was detected. By checking the first character, this set of if statements determines which LED to light. The method RespondToCommand is called with several arguments: void RespondToCommand(tflite::ErrorReporter* error_reporter, int32_t current_time, const char* found_command, uint8_t score, bool is_new_command) { error_reporteris used to log debug information (more on that later). current_timerepresents the time that the command was detected. found_commandtells us which command was detected. scoretells us how confident we are that we detected a command. is_new_commandlets us know if this is the first time hearing the command. The score is an integer number from 0-255 that represents the probability that a command was detected. The sample code only considers a command as valid if the score is greater than 200. Based on our testing, most valid commands fall within the range of 200-210. Modify the code The SparkFun Edge board has four LEDs. Currently, we're flashing the blue LED to indicate that recognition is occurring. You can see this in the command_responder.cc file: static int count = 0; // Toggle the blue LED every time an inference is performed. ++count; if (count & 1) { am_hal_gpio_output_set(AM_BSP_GPIO_LED_BLUE); } else { am_hal_gpio_output_clear(AM_BSP_GPIO_LED_BLUE); } Since we have a bank of four LEDs, let's modify the program to use them as a visual indicator of the score of a given command. A low score will merit a single lit LED, and a high score will result in multiple lights. To ensure we have a way to know that the program is running, we'll make the red LED flash continually instead of the blue. The adjacent blue, green, and yellow LEDs will be used to show the strength of our most recent score. And for simplicity, we'll only light up those LEDs if the word "yes" is spoken. If another word is detected, the LEDs will clear. To make this change, replace all the code in your command_responder.cc file with the following snippet: #include "tensorflow/lite/micro/examples/micro_speech/command_responder.h" #include "am_bsp.h" // This implementation will light up the LEDs on the board in response to different commands. void RespondToCommand(tflite::ErrorReporter* error_reporter, int32_t current_time, const char* found_command, uint8_t score, bool is_new_command) { static bool is_initialized = false; if (!is_initialized) { // Setup LEDs as outputs am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_RED, g_AM_HAL_GPIO_OUTPUT_12); am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_BLUE, g_AM_HAL_GPIO_OUTPUT_12); am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_GREEN, g_AM_HAL_GPIO_OUTPUT_12); am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_YELLOW, g_AM_HAL_GPIO_OUTPUT_12); // Ensure all pins are cleared am_hal_gpio_output_clear(AM_BSP_GPIO_LED_RED); am_hal_gpio_output_clear(AM_BSP_GPIO_LED_BLUE); am_hal_gpio_output_clear(AM_BSP_GPIO_LED_GREEN); am_hal_gpio_output_clear(AM_BSP_GPIO_LED_YELLOW); is_initialized = true; } static int count = 0; // Toggle the red LED every time an inference is performed. ++count; if (count & 1) { am_hal_gpio_output_set(AM_BSP_GPIO_LED_RED); } else { am_hal_gpio_output_clear(AM_BSP_GPIO_LED_RED); } if (is_new_command) { // Clear the last three LEDs am_hal_gpio_output_clear(AM_BSP_GPIO_LED_BLUE); am_hal_gpio_output_clear(AM_BSP_GPIO_LED_GREEN); am_hal_gpio_output_clear(AM_BSP_GPIO_LED_YELLOW); error_reporter->Report("Heard %s (%d) @%dms", found_command, score, current_time); // Only indicate a 'yes' if (found_command[0] == 'y') { // Always light the blue LED am_hal_gpio_output_set(AM_BSP_GPIO_LED_BLUE); // Light the other LEDs depending on score if (score >= 205) { am_hal_gpio_output_set(AM_BSP_GPIO_LED_GREEN); } if(score >= 210) { am_hal_gpio_output_set(AM_BSP_GPIO_LED_YELLOW); } } } } If a new command is detected, is_new_command will be true. We'll clear the blue, green, and yellow LEDs, then light them up again depending on the values of found_command and score. Rebuild and flash Once you've made code changes, test it by running all steps from Build and prepare the binary. Congratulations, you've successfully built your first speech detector on a microcontroller! We hope you've enjoyed this brief introduction to development with TensorFlow Lite for Microcontrollers. The idea of deep learning on microcontrollers is new and exciting, and we encourage you to go out and experiment! Reference docs - Train your own model to understand different commands, now that you have experience working with the basic program. Note: Training will take a couple of hours! - Learn more about TensorFlow Lite for Microcontrollers ( Website, GitHub). - Try other examples and try running them on the SparkFun Edge, if it's supported. - Refer to the O'Reilly book TinyML: Machine Learning with TensorFlow on Arduino and Ultra-Low Power Micro-Controllers, which introduces machine learning on tiny devices and walks through several fun projects. This codelab is based on Chapter 7 and 8 of the book. Thanks, and have fun building!
https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow/?source=post_page---------------------------
CC-MAIN-2020-45
refinedweb
3,395
52.49
: It is often vital for applications to be able to secure the data passed to and from a SQL Server database server. With SQL Server, you can use SSL to create an encrypted channel. This How To shows you how to install a certificate on the database server, configure SQL Server for SSL and to verify that the channel is secure. (9 printed pages) Notes Summary of Steps Step 1. Request and Install a Server Authentication CertificateStep 2. Verify that the Certificate Has Been Installed Step 3. Install the Issuing CA's Certificate on the Client Step 4. Force All Clients to Use SSL Step 5. Allow Clients to Determine Whether to Use SSL Step 6. Verify that Communication Is Encrypted Additional Resources You can use the Secure Sockets Layer (SSL) protocol to secure the communication link between clients (direct callers) and Microsoft® SQL Server. When you configure SQL Server for SSL, all of the data transmitted between client and server (and vice versa) may be encrypted to ensure that the data remains confidential while in transit between the client and SQL Server. You can configure the server to force all clients to use SSL (as described in this procedure), or you can let clients choose whether or not to use SSL on a per-connection basis (as described in the next procedure). The advantages of configuring the server to force clients to use SSL are: The disadvantages are: To force all clients to use SSL SSL is not supported with other protocols. All subsequent client connections will be required to use SSL, whether they specify secure connections or not. This procedure shows you how to configure SSL to allow clients to choose whether or not to use SSL. You can either configure the client libraries to enforce the use of SSL on all connections, or you can let individual applications choose on a per-connection basis. The advantages of configuring the client" In this procedure you will use Network Monitor to verify that data sent between the application server and database server is encrypted. You will start by sending data in clear text form and then enable encryption first by configuring the server and then by configuring the client. To verify that communication is encrypted Note Replace server name in the connection string with the name of your database server. using System; using System.Data; using System.Data.SqlClient; namespace SQLSecureClient { class Class1 { [STAThread] static void Main(string[] args) { // Replace the server name in the following connection string // with the name of your database server SqlConnection conn = new SqlConnection( "server='sql01';database=NorthWind;Integrated Security='SSPI'"); SqlCommand cmd = new SqlCommand("SELECT * FROM Products"); try { conn.Open(); cmd.Connection = conn; SqlDataReader reader = cmd.ExecuteReader(); while (reader.Read()) { Console.WriteLine("{0} {1}", reader.GetInt32(0).ToString(), reader.GetString(1) ); } reader.Close(); } catch( Exception ex) { } finally { conn.Close(); } } } } You must also use SQL Server Enterprise Manager to create a database logon for the newly created account and add a new database user for this logon to the Northwind database. Note A limited version of Network Monitor is available with Windows 2000 Server and Windows Server 2003. A full version is available with Microsoft SMS. If you do not have Network Monitor installed, go to Add/Remove Programs in Control Panel, click Add/Remove Windows Components, select Management and Monitoring Tools from the Windows Components list, click Details and select Network Monitor Tools. Click OK, and then click Next, to install the limited version of Network Monitor. You may be prompted for a Windows 2000 Server or Windows Server 2003 installation CD. –. ]]>
https://msdn.microsoft.com/en-us/aa302414.aspx
CC-MAIN-2016-07
refinedweb
599
53.41
An applet is a class file that is specially written to display graphics in a web browser. Applets are embedded in web pages using the HTML tag <Applet>.. This applet uses nothing from the AWT besides the Graphics class. A simple program using applet looks like: This program Java Applet Hello World Examples introduces the Applet in Java. You will learn in this example how to develop and run applet code in browser.This program Java Applet Hello World Examples creates a simple applet that displays Hello World message. import java.applet.Applet; import java.awt.Graphics; public class HelloAppletWorld extends Applet { public void paint(Graphics g) { g.drawString("Hello World!", 200, 100); } } Compiling Applets: javac HelloAppletWorld.java Running this HelloAppletWorld program from Web browser: Running applet program with web browser is easy job, create an HelloAppletWorld.html file with the below code: <html> <head> <title>Java Applet Hello World Examples</title> </head> <body> <applet code="HelloAppletWorld.class" width="500" height="500"></applet> </body> </html> The Applet tag in HelloAppletWorld.html is used to embed an applet in the web page. <applet code="HelloAppletWorld.class" width="500" height="500"></applet>
https://ecomputernotes.com/java/awt-and-applets/javaapplethelloworld
CC-MAIN-2022-05
refinedweb
190
50.94
I updated the RUN lines of the .ll test to prevent the failure at. FYI: I am trying to figure out this failure that does not appear on my machine: This last change is trying to remove the failure at I haven’t been able to reproduce such failure on my dev machine, so I just changed the RUN lines in the test (added -c, used 2>%t) to see if the bot is going to be happy with the new invocation. I have removed the C test, as the LL test is enough to test the changes I have done in Local.cpp. I can confirm that with this patch, the release note ton'r raise any warnings when compiling with debug info: frapet01@man-08:~/projects/upstream-clang/build-clang$ cat ../release-notes.c #include <arm_sve.h> void VLA_add_arrays(double *x, double *y, double *out, unsigned N) { for (unsigned i = 0; i < N; i += svcntd()) { svbool_t Pg = svwhilelt_b64(i, N); svfloat64_t vx = svld1(Pg, &x[i]); svfloat64_t vy = svld1(Pg, &y[i]); svfloat64_t vout = svadd_x(Pg, vx, vy); svst1(Pg, &out[i], vout); } } frapet01@man-08:~/projects/upstream-clang/build-clang$ ./bin/clang --target=aarch64-gnu-linux -march=armv8-a+sve -c -g -S -O3 -o /dev/null ../release-notes.c frapet01@man-08:~/projects/upstream-clang/build-clang$ When the patch will end up into a nightly build of compiler explorer, the compilation at this link should not raise any warnings: This patch needs to be retitled to what this is actually doing: changing the getTypeAllocationSizeInBits and getFragmentSizeInBits to return a TypeSize instead of unsigned. It would be even better if you can split those up into two patches with separate tests for each one of them. I'm not entirely sure if it is acceptable to have a clang test that generates assembly, but in any case I think this test needs to be added in a separate patch. Change back to auto ? Should the scalable flag match? Same question for all the other cases in this patch. Thank you for the review @sdesmalen Thank you! I'd like to keep this, because it seems to me the use of auto in this case didn't fit in the cases mentioned in. There is no direct type deduction from the context around the initialization of the variable Fragment. I have added assertions around before TypeSize comparisons where it made sense to do so, but not in the lambda used in the sort invocation. We don't need to check whether the scalable flag matches here because we are sorting a container that (potentially) have both types of vectors. Rebase on top of D92020. NFC. Add comment to DbgVariableIntrinsic::getFragmentSizeInBits(). NFC Hi @sdesmalen I have extracted D92020 to implement only the change of interface for AllocaInst::getAllocationSizeInBits. I wanted to extract also the interface change in DbgVariableIntrinsic::getFragmentSizeInBits() but such change would require also to change valueCoversEntireFragment: essentially, this is already what I am doing in this patch.,. I have added a comment in DbgVariableIntrinsic::getFragmentSizeInBits() explaining that to get full support for scalable types in DbgVariableIntrinsic::getFragmentSizeInBits() we should update the DIVariable::getSizeInBits to carry the scalable flag for scalable variables. I am happy to continue in that direction (it seems an extensive change with quite a few implications), I just wanted to double check whether 1. you agree on the need to change DIVariable::getSizeInBits to privide TypeSize info, and 2. if you think if you are happy for the warning fix (this patch) to go in before the DIVariable::getSizeInBits interface change, or 3. do you see a third way? :) Cheers, Francesco Reverted getFragmentSizeInBits to return an integral type and not TypeSize, because scalable variables cannot be part of structs/arrays - hence they cannot be pointed by fragments. I am finishing up this patch. 😄 Reduce debug info in debug-declare-no-warnings-on-scalable-vectors.ll. Please rename debug-declare-no-warnings-on-scalable-vectors.ll to something different, because these 'warnings' are only temporary and will be replaced by errors in the future. Having 'no warnings' in the name of the test name seems wrong from that perspective. In D91806#2413692, @fpetrogalli wrote:. Given that the generated warnings are temporary and only a side-effect of the implicit conversion 'hack', and thus not a feature or design choice, I still think the patch needs to be retitled to better phrase what it does, i.e. change getTypeAllocationSizeInBits to return TypeSize instead of unsigned. Address @sdesmalen's comments. I've taken over the differential. Simplify things a bit more, remove an untested remnant created from before the patch split. Attempting to update commit message with arc diff --verbatim. After your changes, what line causes this warning to be emitted? Does this test also pass with asserts enabled? It's a -NOT check, so it's not being emitted. Yes, the test passes even with asserts enabled. I assume the question is motivated by putting the check next to the line which causes the warning(?). Update for review comment. Discussed out of band. Sander's interest was to disallow any warning output, as with other SVE ASM-NOT tests in the clang directory. this lgtm with all other reviewer's concerns addressed. LGTM
https://reviews.llvm.org/D91806/new/
CC-MAIN-2022-27
refinedweb
878
63.8
Why C# is better than C++06 May 2010 This post was imported from blogspot.I could phrase this as "why I hate C++" or "why C++ sucks", but let's try the more positive spin, "why C# is better". The reasons are so numerous and compelling that there is only one reason to use C++ instead, and that is better performance. - C# compiles much faster - IntelliSense is much more reliable and faster (press F12 in Visual Studio to see the definition of any symbol) - Automatic memory management cuts your development time in half all by itself - not just because you write less code, but also because you'll never have to track down "double free" problems and you'll very rarely have memory leaks (and if do you have memory leaks, CLR profiler can help track them down) - No weird errors caused by #include order or #defines - No more buffer overflows or other C-related security vulnerabilities - Debugging is much easier; you can execute arbitrary expressions and call your own functions and properties from within Visual Studio's debugger (and SharpDevelop, I expect) - Strings are handled the same way in all code (no more converting between various string representations) - C# has anonymous inner functions with type inference (but the newest version of C++ has so-called "lambdas" too) - GUIs are easier to make in C# (at least if you use WinForms. I found WPF very hard to learn) - LINQ (Language INtegrated Query) - "yield return" statement for writing generators and coroutines (approximate C equivalent) - You can create and compile code at run-time using Reflection.Emit, Dynamic Methods or (easiest) LambdaExpression.Compile. These use the JIT engine to produce new machine code at run-time. - The .NET standard libraries ("BCL") have more capabilities than those of C++, and the STL is more cumbersome (object->my_vector.erase(object->my_vector.begin() + index), anyone?) - Unlike C++ templates, C# generics are guaranteed to work for all type parameters, do not bloat your code size, and can be used by modules linked dynamically (mind you, C++ templates can do some things that are hard/impossible for generics, but advanced use of templates is difficult) - The MS C# compiler gives much better error messages than the MS C++ compiler - You can mix different programming languages much more easily in .NET. Bjarne Stroustrup said, "I consider the idea of one language, one programming tool, as the one and only best tool for everyone and for every problem infantile"--yet C++ isn't designed to inter-operate with any language other than C. You can use SWIG if necessary, but it's got a big learning curve and C++ can't take credit for it anyway. On Windows, COM is a possible solution, but it's a huge pain to write COM classes in C++. - There are various tools for analyzing and modifying .NET assemblies/programs after they are compiled, e.g. PostSharp provides aspect-oriented programming and Microsoft Code Contracts let you specify preconditions, postconditions and invariants in your classes. - You can easily see how the standard libraries work in their binary form, using Reflector (the source code of the BCL is also available). - You can write "safe" code that can run directly in a web browser (Silverlight) - It is possible to write a C# program that targets a mobile device (ARM) and run the same binary on your desktop PC - You'll no longer have to manage *.h files and write every function declaration twice. Well, you can save yourself work by leaving short"inline" functions in the header file, but you'll pay for it later with slower compile times. The need to use a different syntax for member functions in the header file than the implementation is a huge pet peeve of mine. Consider the difference between the header file declaration "virtual std::string Name(bool longForm = false);" and the cpp file equivalent "string ClassName::Name(bool longForm) { ... }". In C++ I'm expected to manually remove "virtual", "= false" and the semicolon, but add "ClassName::". Plus you might want to remove "std::" if you're "using namespace std" in the cpp file. Doing all this a few dozen times in a day can drive me mad, and of course the two copies make maintenance harder too. - Dynamic linking and reflection make it easy to support plug-in architectures, and to use 3rd party libraries without compiling them yourself. - No more dependency-detection glitches where you change a struct or virtual function table but not all dependencies are recompiled, leading to bizarre and unpredictable run-time behavior. In C# I never see such glitches, and if they did happen you would get a run-time exception rather than weird crashes or strange behavior. - C# IDEs support automatic refactoring and Visual C# underlines syntax and semantic errors as soon as you make them. No such luck in C++! Unfortunately, I still have to use C++! It is still the performance king and the standard on Windows platforms, and I maintain a performance-critical project for WinCE, on which .NET performs poorly. Some say C# is slower, but I find that if you write code carefully (more like you would in C), you can make C# almost as fast as C. The Microsoft .NET JIT compiler does not optimize nearly as well as a C compiler, but I find that C# code generally runs faster than a debug build of equivalent C/C++ code. Unfortunately, whereas C++ code may be 20 times slower when it runs on mobile (ARM-based) devices compared to a desktop PC, C# on the Compact Framework seems to be closer to 100 times slower than the same C# code running on a desktop PC. Microsoft needs to put a lot more work into their ARM version! Also note that Compact Framework has fewer features (e.g. no support for run-time code generation). Update: Long after writing this blog post I made a C++ vs C# benchmark. The conclusions were exactly as I expected/feared: C# is often almost as fast as C++ (but about half as fast in certain cases), while the Compact Framework is 3-11 times slower than C++. Now as much as I hate C++, and prefer C# over Java, the .NET Framework is far from perfect. Some parts of the BCL were badly designed, and by now it is getting to be extremely bloated. Also, I think the .NET framework needs some new features. Chief among my requests would be Go-style slices and interfaces, and return type covariance (of course). This would bring a lot of C's big-fiddling prowess to C# without compromising type safety. You know, that deserves its own blog entry.
http://loyc.net/2010/why-c-is-better-than-c.html
CC-MAIN-2019-26
refinedweb
1,118
57.81
glview_loop() Enter glview's execution loop. Synopsis: #include <glview/glview.h> GLVIEW_API int glview_loop(void) Since: BlackBerry 10.0.0 Arguments: Library:libglview (For the qcc command, use the -l glview option to link against this library) Description: This function contains glview's main execution loop. You must call glview_initialize() before calling this function. This function invokes the initialize callback before entering the execution loop. Once started, the execution loop runs for the life of the app, invoking at various points the registered callbacks. The only mandatory callback is the display callback, which is called once per loop. - Pull all events off the event queue and process them: - On background: fire the background callback (if registered) - On foreground: fire the foreground callback (if registered) - On orientation change: fire the resize callback (if registered) - Fire the event callback with event details (if registered) - On exit: break out of the execution loop. - Call the display callback if the app is in the foreground. - Swap the graphic's buffers. - Repeat from the top. When the user exits the app, the execution loop breaks, and the finalize callback is invoked. The glview_loop() function does not return until the user exits the application. When glview_loop() returns, the graphics stack has already been taken down, and glview has already been destroyed. Any calls to glview after glview_loop() has returned will either fail or have unspecified behavior. Returns: - EPERM: glview_initialize() was not called prior to glview_loop(). - EACCES: Failed to set BPS channel. Last modified: 2014-09-30 Got questions about leaving a comment? Get answers from our Disqus FAQ.comments powered by Disqus
http://developer.blackberry.com/native/reference/core/com.qnx.doc.glview.lib_ref/topic/glview_loop.html
CC-MAIN-2014-42
refinedweb
266
67.65
> But the only way you're going to know if the debuginfo items are there > is by reading through all of the debuginfo metadata. > > > > you're cutting off a lot of packages. > > Also - are you -1'ing the whole idea or just the idea as regards > debuginfo packages? We can save a lot of data by pruning out src.rpms. I think it is correct to move src.rpms and debuginfo out of the default path. Instead of moving this to the user to enable/disable it, I thought to use differently named files within the repodata. All information about src.rpms goes to new filenames and you can trigger reading in the debuginfo files if a "*-debuginfo" package name is needed. This puts the namespace question to the repodata and is adding special interpretation to rpm package names ("*-debuginfo"), but tries to get all this more transparent to the user compared to offering this via configured repo locations. ?? Both ways have their pros and cons. greetings, Florian La Roche
http://www.redhat.com/archives/fedora-devel-list/2005-January/msg01909.html
CC-MAIN-2014-41
refinedweb
170
66.13
Create a C# program that makes a copy of a text file but encrypted. Add the characters of each letter 13 times to encrypt the contents of the text file. using System.IO; public class FileEncrypter { public static void Main(string[] args) { string inputFileName = "input.txt"; string outputFileName = "output.txt"; string contents = File.ReadAllText(inputFileName); string cipher = string.Empty; byte key = 13; foreach (char letter in contents) { cipher += (char)(letter + key); } File.WriteAllText(outputFileName, cipher); } } Practice C# anywhere with the free app for Android devices. Learn C# at your own pace, the exercises are ordered by difficulty. Own and third party cookies to improve our services. If you go on surfing, we will consider you accepting its use.
https://www.exercisescsharp.com/text-files-b/encrypt-text-file
CC-MAIN-2020-40
refinedweb
118
61.53
Paperclip Paperclip is intended as an easy file attachment library for Active Record. GhostScript to be installed. On Mac OS X, you can also install that using Homebrew: brew install gs Installation Paperclip is distributed as a gem, which is how it should be used in your app. It's technically still installable as a plugin, but that's discouraged, as Rails plays well with gems. Include the gem in your Gemfile: gem "paperclip", "~> 2. Anyway, if you don't use Bundler (though you probably should, even in Rails 2), with config.gem # In config/environment.rb ... Rails::Initializer.run do |config| ... config.gem "paperclip", :version => "~> 2.4" ... end For Non-Rails usage: class ModuleName < ActiveRecord::Base include Paperclip::Glue ... end Quick Start In your model: class User < ActiveRecord::Base has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" } end In your migrations: class AddAvatarColumnsToUser < ActiveRecord::Migration def self.up change_table :users do |t| t.has_attached_file :avatar end end def self.down drop_attached_file :users, :avatar end end In your edit and new views: <%= form_for :user, @user, :url => user_path, :html => { :multipart => true } do |form| %> <%= form.file_field :avatar %> <% end %> In your controller: def create @user = User.create( params[:user] ) end In your show view: <%= image_tag @user.avatar.url %> <%= image_tag @user.avatar.url(:medium) %> <%= image_tag @user.avatar.url(:thumb) %> To detach a file, simply set the attribute to nil: @user.avatar = nil @user.save Usage The basics of paperclip are quite simple: Declare that your model has an attachment with the has_attached_file method, and give it a name. Paperclip will wrap up to has_attached_file is available in the documentation of Paperclip::ClassMethods. For validations, attachments can be validated with these Paperclip's validation methods: validates_attachment_presence validates_attachment_content_type validates_attachment_size Storage The files that are assigned as attachments are, by default, placed in the directory specified by the :path option to has_attached_file. By default, this location is :rails_root/public/system/:attachment/:id//avatars/13', '~> 1.3". compatability reasons, you can pass a single geometry string or an array containing a geometry and a format, which (i.e. jpgs will remain jpgs). For more information on the accepted style formats, see.. { |attachment| { :thumb => (attachment :watermark end Deploy Paperclip is aware of new attachment styles you have added in previous deploy. :deploy do desc "build missing paperclip styles" task :build_missing_paperclip_styles, :roles => :app do run "cd #{release_path}; RAILS_ENV=production bundle exec rake paperclip:refresh:missing_styles" end end after("deploy:update_code", "deploy:build_missing_paperclip_styles") Now you don't have to remember to refresh thumbnails in production everytime you add new style. Unfortunately it does not work with dynamic styles - it just ignores them. If you already have working app and don't want rake paperclip:refresh:missing_styles to refresh old pictures, you need to tell Paperclip about existing styles. Simply create. Contributing If you'd like to contribute a feature or bugfix: Thanks! To make sure your fix/feature has a high chance of being included, please read the following guidelines: - Ask on the mailing list[], or post a new GitHub Issue[]. - Make sure there are tests! We will not accept any patch that is not tested. It's a rare time when explicit tests aren't needed. If you have questions about writing tests for paperclip, please ask the mailing list. Please see CONTRIBUTING.md for details. Credits Paperclip is maintained and funded by thoughtbot, inc Thank you to all the contributors! The names and logos for thoughtbot are trademarks of thoughtbot, inc. License Paperclip is Copyright © 2008-2011 thoughtbot. It is free software, and may be redistributed under the terms specified in the MIT-LICENSE file.
http://www.rubydoc.info/gems/paperclip/2.7.0
CC-MAIN-2017-51
refinedweb
595
50.33
Hi, since you started to register each installed copy maybe it would be good also to have a retirement of installations option via the web. For instance, I almost always install my wing on client-site servers to be able to do post-install debuging of apps. Next week Im about to retire a w2k server at a client-site and replace it with a linux server. Same goes for all the vmware sessions I keep reinstalling - they eat up licenses real quikly... Instead of now having used up two licenses, wouldnt it make sence if I could release the no-longer needed license, otherwise I sooner or later once again will end up being irritated at you guys when I sit at a client site and find that I run out of installments and need to run in 10-minute mode. Both frustrating to wait for mail from you due to TZ differences, and also emarasing when the client happens to see me restarting wing and asks if they dont pay me well enough to be able to purchase my devel-environment :( Also it doesnt give you that warn fuzzy feeling if I have to "beg" for licenses every now and then, after all I did pay for the product. Another option would be to ditch the current restricted installment alotment, and instead do a four step procedure: 1. I install wing on a new system, enter my license number, this is mailed to your server, this system is granted a 1 day temporary license 2. You check the mailadr I registred with on purchase and send out a automatic notificaion 3. Wich I accept and return (agreeing to all kinds of mumbo-yumbo legaleese) 4. Now next time I start the newly installed system it checks your server and is granted a permanent license. This way you dont have to manually bother if I run wing on 2 or 12 different systems, and you still have some control, since I have accepted the new system with my emailadr. If I sudenly registred 10 systems you could always send a nice letter asking me whats up ;) Your current license system is clearly not suitable for people working with network-computing, since you seem to asume that your customers only run wing on one or two systems. One option would of course be to ditch wing and convert to eric to solve all this, but I really prefer wing, so I rather stick to your product. regards /Jacob Lundqvist
http://wingware.com/pipermail/wingide-users/2005-February/002149.html
CC-MAIN-2014-10
refinedweb
419
60.58
FMAX(3) BSD Programmer's Manual FMAX(3) fmax, fmaxf, fmaxl, fmin, fminf, fminl - floating-point maximum and minimum functions libm #include <math.h> double fmax(double x, double y); float fmaxf(float x, float y); long double fmaxl(long double x, long double y); double fmin(double x, double y); float fminf(float x, float y); long double fminl(long double x, long double y); The fmax(), fmaxf(), and fmaxl() functions return the larger of x and y, and likewise, the fmin(), fminf(), and fminl() functions return the smaller of x and y. They treat +0.0 as being larger than -0.0. If one.
http://mirbsd.mirsolutions.de/htman/i386/man3/fminl.htm
crawl-003
refinedweb
106
64.75
ComboBox Overview The Blazor ComboBox component allows the user to choose an option from a predefined set of choices presented in a dropdown popup. You can also allow them to enter custom values and to filter the available items. You can control the data, sizes, and various appearance options like class and templates. The ComboBox component is part of Telerik UI for Blazor, a professional grade UI library with 70+ native components for building modern and feature-rich applications. To try it out sign up for a free 30-day trial. The ComboBox component is part of Telerik UI for Blazor, a professional grade UI library with 70+ native components for building modern and feature-rich applications. To try it out sign up for a free 30-day trial. To use a Telerik ComboBox for Blazor - add the TelerikComboBoxtag - populate its Dataproperty with the collection of items you want in the dropdown - set the TextFieldand ValueFieldproperties to point to the corresponding names of the model - (optional) set the Valueproperty to the initial value of the model. - (optional) enable features like filtering and clear button Combobox data binding, two-way value binding and main features Selected value: @selectedValue <br /> <TelerikComboBox Data="@myComboData" TextField="MyTextField" ValueField="MyValueField" @ </TelerikComboBox> @code { IEnumerable<MyDdlModel> myComboData = Enumerable.Range(1, 20).Select(x => new MyDdlModel { MyTextField = "item " + x, MyValueField = x }); int selectedValue { get; set; } //Define a preselected value when the component initializes. Placeholder will not be shown as the selected value is defined. protected override void OnInitialized() { selectedValue = 3; } //in a real case, the model is usually in a separate file //the model type and value field type must be provided to the dropdpownlist public class MyDdlModel { public int MyValueField { get; set; } public string MyTextField { get; set; } } } The result from the code snippet above Component namespace and reference The ComboBox is a generic component and its type is determined by the type of the model you pass to it, and the type of its value field. You can find examples in the Data Bind - Considerations article. Features The ComboBox provides the following features: AllowCustom- whether the user can enter custom values. If enabled, the ValueFieldmust be a string. Class- the CSS class that will be rendered on the main wrapping element of the combobox. PopupClass- additional CSS class to customize the appearance of the ComboBox's dropdown. ClearButton- whether the user will have the option to clear the selected value. When it is clicked, the Valuewill be updated to default(TValue), so there must be no item in the Datathat has such a Value. For example, if TValueis int, clearing the value will lead to a 0 Value, so if there is an Item with 0in its ValueField- issues may arise with its selection. This feature can often go together with AllowCustom. Data- allows you to provide the data source. Required. Enabled- whether the component is enabled. Filterable- whether filtering is enabled for the end user. FilterOperator- the method of filtering the items. Defaults to StartsWith. Id- renders as the idattribute on the <input />element, so you can attach a <label for="">to the input. Placeholder- the text the user sees as a hint when no item is selected.. PopupHeight- the height of the expanded dropdown list element. PopupWidth- the width of the expanded dropdown list element. If you don't specify a value, the dropdown width will match the main element which can help with responsive layouts and 100% widths. TItem- the type of the model to which the component is bound. Required if you can't provide Dataor Value. Determines the type of the reference object. TValue- the type of the value field from the model to which the component is bound. Required if you can't provide Dataor Value. Determines the type of the reference object.. TabIndex- maps to the tabindexattribute of the HTML element. You can use it to customize the order in which the inputs in your form focus with the Tabkey. Templates - they allow you to control the rendering of items in the component. See the Templates article for more details. Validation - see the Input Validation article for more details. Selected Item By default, if no Value is provided, the ComboBox will appear empty, or will display the Placeholder defined. If a Value is provided and AllowCustom is not set to true, the Value should match an item in the data source (see more in the Value Out of Range section). The ComboBox will not always have a selected item, however, because it can act as an input. There will be no selected item in the following cases that depend on the settings of the component that the developer can control: - the user clears the value through the Clear button, - the user clears the value with Backspaceor Delkeys, AllowCustom="false"- when a custom value is typed, the ComboBox input value will be automatically cleared on the change event ( blurof the input or Enterkeypress). See the table below. AllowCustom="true"- when the user starts typing a custom value. Missing selection is most common when the initial value is null as data sources rarely have items with a null value, and/or when you want to let your users type in values that are not in your predefined set of options. If the user types text in the input, selection behaves according to the following table:
https://docs.telerik.com/blazor-ui/components/combobox/overview
CC-MAIN-2021-10
refinedweb
893
53.81
Mark All Tests as Async in pytest To run an async test in pytest, it must be marked. It can be a pain to mark them all individually. Here's how to mark them all async at once. Asynchronous tests You'd need an asynchronous test for asynchronous code under test. For instance, Quart is an app server where all the endpoints are asynchronous. If you were testing that Quart app, you might create a test like: from quart import Quart async def test_thing(): client = Quart("appname", static_folder=None).test_client() response = await client.get("/api/thing") assert response.status_code == 200 When running pytest, the use of async and await keywords will fail, reporting that a plugin is needed. Plugin The plugin that enables async testing in pytest is pytest-asyncio. To install: poetry add pytest-asyncio Or instead of poetry, use the package manager of your choice. Mark individually Now you'd be able to mark a test individually. In your test file, write: import pytest @pytest.mark.asyncio async def test_1(): # ... @pytest.mark.asyncio async def test_2(): # ... Mark all tests async But if you have many tests in a single module, this can be annoying to repeat the decorator. To mark all tests in the module async, you mark a special module-global variable called pytestmark. At the top of your test file add: import pytest pytestmark = pytest.mark.asyncio Now you have no more need to decorate every test function.
https://jaketrent.com/post/mark-all-pytests-async/
CC-MAIN-2022-40
refinedweb
242
76.52
From our sponsor: Market smarter with Mailchimp's automated messaging tools. In this tutorial we’re going to build a water-like effect with a bit of basic math, a canvas, and postprocessing. No fluid simulation, GPGPU, or any of that complicated stuff. We’re going to draw pretty circles in a canvas, and distort the scene with the result. We recommend that you get familiar with the basics of Three.js because we’ll omit some of the setup. But don’t worry, most of the tutorial will deal with good old JavaScript and the canvas API. Feel free to chime in if you don’t feel too confident on the Three.js parts. The effect is divided into two main parts: - Capturing and drawing the ripples to a canvas - Displacing the rendered scene with postprocessing Let’s start with updating and drawing the ripples since that’s what constitutes the core of the effect. Making the ripples The first idea that comes to mind is to use the current mouse position as a uniform and then simply displace the scene and call it a day. But that would mean only having one ripple that always remains at the mouse’s position. We want something more interesting, so we want many independent ripples moving at different positions. For that we’ll need to keep track of each one of them. We’re going to create a WaterTexture class to manage everything related to the ripples: - Capture every mouse movement as a new ripple in an array. - Draw the ripples to a canvas - Erase the ripples when their lifespan is over - Move the ripples using their initial momentum For now, let’s begin coding by creating our main App class. import { WaterTexture } from './WaterTexture'; class App{ constructor(){ this.waterTexture = new WaterTexture({ debug: true }); this.tick = this.tick.bind(this); this.init(); } init(){ this.tick(); } tick(){ this.waterTexture.update(); requestAnimationFrame(this.tick); } } const myApp = new App(); Let’s create our ripple manager WaterTexture with a teeny-tiny 64px canvas. export class WaterTexture{ constructor(options) { this.size = 64; this.radius = this.size * 0.1; this.width = this.height = this.size; if (options.debug) { this.width = window.innerWidth; this.height = window.innerHeight; this.radius = this.width * 0.05; } this.initTexture(); if(options.debug) document.body.append(this.canvas); } // Initialize our canvas initTexture() { this.canvas = document.createElement("canvas"); this.canvas.id = "WaterTexture"; this.canvas.width = this.width; this.canvas.height = this.height; this.ctx = this.canvas.getContext("2d"); this.clear(); } clear() { this.ctx.fillStyle = "black"; this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height); } update(){} } Note that for development purposes there is a debug option to mount the canvas to the DOM and give it a bigger size. In the end result we won’t be using this option. Now we can go ahead and start adding some of the logic to make our ripples work: - On constructor()add this.pointsarray to keep all our ripples this.radiusfor the max-radius of a ripple this.maxAgefor the max-age of a ripple - On Update(), - clear the canvas - sing happy birthday to each ripple, and remove those older than this.maxAge - draw each ripple - Create AddPoint(), which is going to take a normalized position and add a new point to the array. class WaterTexture(){ constructor(){ this.size = 64; this.radius = this.size * 0.1; this.points = []; this.maxAge = 64; ... } ... addPoint(point){ this.points.push({ x: point.x, y: point.y, age: 0 }); } update(){ this.clear(); this.points.forEach(point => { point.age += 1; if(point.age > this.maxAge){ this.points.splice(i, 1); } }) this.points.forEach(point => { this.drawPoint(point); }) } } Note that AddPoint() receives normalized values, from 0 to 1. If the canvas happens to resize, we can use the normalized points to draw using the correct size. Let’s create drawPoint(point) to start drawing the ripples: Convert the normalized point coordinates into canvas coordinates. Then, draw a happy little circle: class WaterTexture(){ ... drawPoint(point) { // Convert normalized position into canvas coordinates let pos = { x: point.x * this.width, y: point.y * this.height } const radius = this.radius; this.ctx.beginPath(); this.ctx.arc(pos.x, pos.y, radius, 0, Math.PI * 2); this.ctx.fill(); } } For our ripples to have a strong push at the center and a weak force at the edges, we’ll make our circle a Radial Gradient, which looses transparency as it moves to the edges. Radial Gradients create a dithering-like effect when a lot of them overlap. It looks stylish but not as smooth as what we want it to look like. To make our ripples smooth, we’ll use the circle’s shadow instead of using the circle itself. Shadows give us the gradient-like result without the dithering-like effect. The difference is in the way shadows are painted to the canvas. Since we only want to see the shadow and not the flat-colored circle, we’ll give the shadow a high offset. And we’ll move the circle in the opposite direction. As the ripple gets older, we’ll reduce it’s opacity until it disappears: export class WaterTexture(){ ... drawPoint(point) { ... const ctx = this.ctx; // Lower the opacity as it gets older let intensity = 1.; intensity = 1. - point.age / this.maxAge; let color = "255,255,255"; let offset = this.width * 5.; // 1. Give the shadow a high offset. ctx.shadowOffsetX = offset; ctx.shadowOffsetY = offset; ctx.shadowBlur = radius * 1; ctx.shadowColor = `rgba(${color},${0.2 * intensity})`; this.ctx.beginPath(); this.ctx.fillStyle = "rgba(255,0,0,1)"; // 2. Move the circle to the other direction of the offset this.ctx.arc(pos.x - offset, pos.y - offset, radius, 0, Math.PI * 2); this.ctx.fill(); } } To introduce interactivity, we’ll add the mousemove event listener to app class and send the normalized mouse position to WaterTexture. import { WaterTexture } from './WaterTexture'; class App { ... init(){ window.addEventListener('mousemove', this.onMouseMove.bind(this)); this.tick(); } onMouseMove(ev){ const point = { x: ev.clientX/ window.innerWidth, y: ev.clientY/ window.innerHeight, } this.waterTexture.addPoint(point); } } Great, now we’ve created a disappearing trail of ripples. Now, let’s give them some momentum! Momentum To give momentum to a ripple, we need its direction and force. Whenever we create a new ripple, we’ll compare its position with the last ripple. Then we’ll calculate its unit vector and force. On every update, we’ll update the ripples’ positions with their unit vector and position. And as they get older we’ll move them slower and slower until they retire or go live on a farm. Whatever happens first. export lass WaterTexture{ ... constructor(){ ... this.last = null; } addPoint(point){ let force = 0; let vx = 0; let vy = 0; const last = this.last; if(last){ const relativeX = point.x - last.x; const relativeY = point.y - last.y; // Distance formula const distanceSquared = relativeX * relativeX + relativeY * relativeY; const distance = Math.sqrt(distanceSquared); // Calculate Unit Vector vx = relativeX / distance; vy = relativeY / distance; force = Math.min(distanceSquared * 10000,1.); } this.last = { x: point.x, y: point.y } this.points.push({ x: point.x, y: point.y, age: 0, force, vx, vy }); } update(){ this.clear(); let agePart = 1. / this.maxAge; this.points.forEach((point,i) => { let slowAsOlder = (1.- point.age / this.maxAge) let force = point.force * agePart * slowAsOlder; point.x += point.vx * force; point.y += point.vy * force; point.age += 1; if(point.age > this.maxAge){ this.points.splice(i, 1); } }) this.points.forEach(point => { this.drawPoint(point); }) } } Note that instead of using the last ripple in the array, we use a dedicated this.last. This way, our ripples always have a point of reference to calculate their force and unit vector. Let’s fine-tune the intensity with some easings. Instead of just decreasing until it’s removed, we’ll make it increase at the start and then decrease: const easeOutSine = (t, b, c, d) => { return c * Math.sin((t / d) * (Math.PI / 2)) + b; }; const easeOutQuad = (t, b, c, d) => { t /= d; return -c * t * (t - 2) + b; }; export class WaterTexture(){ drawPoint(point){ ... let intensity = 1.; if (point.age < this.maxAge * 0.3) { intensity = easeOutSine(point.age / (this.maxAge * 0.3), 0, 1, 1); } else { intensity = easeOutQuad( 1 - (point.age - this.maxAge * 0.3) / (this.maxAge * 0.7), 0, 1, 1 ); } intensity *= point.force; ... } } Now we're finished with creating and updating the ripples. It's looking amazing. But how do we use what we have painted to the canvas to distort our final scene? Canvas as a texture Let's use the canvas as a texture, hence the name WaterTexture. We are going to draw our ripples on the canvas, and use it as a texture in a postprocessing shader. First, let's make a texture using our canvas and refresh/update that texture at the end of every update: import * as THREE from 'three' class WaterTexture(){ initTexture(){ ... this.texture = new THREE.Texture(this.canvas); } update(){ ... this.texture.needsUpdate = true; } } By creating a texture of our canvas, we can sample our canvas like we would with any other texture. But how is this useful to us? Our ripples are just white spots on the canvas. In the distortion shader, we're going to need the direction and intensity of the distortion for each pixel. If you recall, we already have the direction and force of each ripple. But how do we communicate that to the shader? Encoding data in the color channels Instead of thinking of the canvas as a place where we draw happy little clouds, we are going to think about the canvas' color channels as places to store our data and read them later on our vertex shader. In the Red and Green channels, we'll store the unit vector of the ripple. In the Blue channel, we'll store the intensity of the ripple. Since RGB channels range from 0 to 255, we need to send our data that range to normalize it. So, we'll transform the unit vector range (-1 to 1) and the intensity range (0 to 1) into 0 to 255. class WaterEffect { drawPoint(point){ ... // Insert data to color channels // RG = Unit vector let red = ((point.vx + 1) / 2) * 255; let green = ((point.vy + 1) / 2) * 255; // B = Unit vector let blue = intensity * 255; let color = `${red}, ${green}, ${blue}`; let offset = this.size * 5; ctx.shadowOffsetX = offset; ctx.shadowOffsetY = offset; ctx.shadowBlur = radius * 1; ctx.shadowColor = `rgba(${color},${0.2 * intensity})`; this.ctx.beginPath(); this.ctx.fillStyle = "rgba(255,0,0,1)"; this.ctx.arc(pos.x - offset, pos.y - offset, radius, 0, Math.PI * 2); this.ctx.fill(); } } Note: Remember how we painted the canvas black? When our shader reads that pixel, it's going to apply a distortion of 0, only distorting where our ripples are painting. Look at the pretty color our beautiful data gives the ripples now! With that, we're finished with the ripples. Next, we'll create our scene and apply the distortion to the result. Creating a basic Three.js scene For this effect, it doesn't matter what we render. So, we'll only have a single plane to showcase the effect. But feel free to create an awesome-looking scene and share it with us in the comments! Since we're done with WaterTexture, don't forget to turn the debug option to false. import * as THREE from "three"; import { WaterTexture } from './WaterTexture'; class App { constructor(){ this.waterTexture = new WaterTexture({ debug: false }); this.renderer = new THREE.WebGLRenderer({ antialias: false }); this.renderer.setSize(window.innerWidth, window.innerHeight); this.renderer.setPixelRatio(window.devicePixelRatio); document.body.append(this.renderer.domElement); this.camera = new THREE.PerspectiveCamera( 45, window.innerWidth / window.innerHeight, 0.1, 10000 ); this.camera.position.z = 50; this.touchTexture = new TouchTexture(); this.tick = this.tick.bind(this); this.onMouseMove = this.onMouseMove.bind(this); this.init(); } addPlane(){ let geometry = new THREE.PlaneBufferGeometry(5,5,1,1); let material = new THREE.MeshNormalMaterial(); let mesh = new THREE.Mesh(geometry, material); window.addEventListener("mousemove", this.onMouseMove); this.scene.add(mesh); } init(){ this.addPlane(); this.tick(); } render(){ this.renderer.render(this.scene, this.camera); } tick(){ this.render(); this.waterTexture.update(); requrestAnimationFrame(this.tick); } } Applying the distortion to the rendered scene We are going to use postprocessing to apply the water-like effect to our render. Postprocessing allows you to add effects or filters after (post) your scene is rendered (processing). Like any kind of image effect or filter you might see on snapchat or Instagram, there is a lot of cool stuff you can do with postprocessing. For our case, we'll render our scene normally with a RenderPass, and apply the effect on top of it with a custom EffectPass. Let's render our scene with postprocessing's EffectComposer instead of the Three.js renderer. Note that EffectComposer works by going through its passes on each render. It doesn't render anything unless it has a pass for it. We need to add the render of our scene using a RenderPass: import { EffectComposer, RenderPass } from 'postprocessing' class App{ constructor(){ ... this.composer = new EffectComposer(this.renderer); this.clock = new THREE.Clock(); ... } initComposer(){ const renderPass = new RenderPass(this.scene, this.camera); this.composer.addPass(renderPass); } init(){ this.initComposer(); ... } render(){ this.composer.render(this.clock.getDelta()); } } Things should look about the same. But now we start adding custom postprocessing effects. We are going to create the WaterEffect class that extends postprocessing's Effect. It is going to receive the canvas texture in the constructor and make it a uniform in its fragment shader. In the fragment shader, we'll distort the UVs using postprocessing's function mainUv using our canvas texture. Postprocessing is then going to take these UVs and sample our regular scene distorted. Although we'll only use postprocessing's mainUv function, there are a lot of interesting functions you can use. I recommend you check out the wiki for more information! Since we already have the unit vector and intensity, we only need to multiply them together. But since the texture values are normalized we need to convert our unit vector from a range of 1 to 0, into a range of -1 to 0: import * as THREE from "three"; import { Effect } from "postprocessing"; export class WaterEffect extends Effect { constructor(texture) { super("WaterEffect", fragment, { uniforms: new Map([["uTexture", new THREE.Uniform(texture)]]) }); } } export default WaterEffect; const fragment = ` uniform sampler2D uTexture; #define PI 3.14159265359 void mainUv(inout vec2 uv) { vec4 tex = texture2D(uTexture, uv); // Convert normalized values into regular unit vector float vx = -(tex.r *2. - 1.); float vy = -(tex.g *2. - 1.); // Normalized intensity works just fine for intensity float intensity = tex.b; float maxAmplitude = 0.2; uv.x += vx * intensity * maxAmplitude; uv.y += vy * intensity * maxAmplitude; } `; We'll then instantiate WaterEffect with our canvas texture and add it as an EffectPass after our RenderPass. Then we'll make sure our composer only renders the last effect to the screen: import { WaterEffect } from './WaterEffect' import { EffectPass } from 'postprocessing' class App{ ... initComposer() { const renderPass = new RenderPass(this.scene, this.camera); this.waterEffect = new WaterEffect( this.touchTexture.texture); const waterPass = new EffectPass(this.camera, this.waterEffect); renderPass.renderToScreen = false; waterPass.renderToScreen = true; this.composer.addPass(renderPass); this.composer.addPass(waterPass); } } And here we have the final result! An awesome and fun effect to play with! Conclusion Through this article, we've created ripples, encoded their data into the color channels and used it in a postprocessing effect to distort our render. That's a lot of complicated-sounding words! Great work, pat yourself on the back or reach out on Twitter and I'll do it for you 🙂 But there's still a lot more to explore: - Drawing the ripples with a hollow circle - Giving the ripples an actual radial-gradient - Expanding the ripples as they get older - Or using the canvas as a texture technique to create interactive particles as in Bruno's article. We hope you enjoyed this tutorial and had a fun time making ripples. If you have any questions, don't hesitate to comment below or on Twitter! Stunning work, i appreciate that! Thanks for the experience share
https://tympanus.net/codrops/2019/10/08/creating-a-water-like-distortion-effect-with-three-js/?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+tympanus+%28Codrops%29
CC-MAIN-2020-10
refinedweb
2,678
52.66
Since Any plans on porting XNA to Windows 8? This is a very cool feature, one that I've wanted for desktop applications for as long as I've been using WPF. What are the chances that the ability to use XAML with native code and DirectX will show up on the desktop application development side of the house? Or is the desktop application development platform just not going to see significant enhancements going forward? Thanks, Eric I've been trying to create a C++/DirectX custom control, that I can consume from a C#/XAML app, but so far with no luck. Could you please post a sample that does this? You can keep it as simple as a possible in the DirectX as you want (since this sample is not about doing advanced rendering but how the stuff interplays), and just focus on having a C++ control render something simple to a SurfaceImageSource, and then the C# app consumes this control in a XAML page – perhaps with a simple binding flowing back to the C++ layer. Another thing I'd like to see covered is doing this so it works on x86, x64 and ARM. The dev story for this as far as I can tell is slightly confusing for the average .NET dev, since they usually use AnyCPU. OMG. Insanely awesome. You've made a small but crucial core of XD professionals ridiculously happy. DID YOU NOT GET THE **********RESOUNDING****** FEEDBACK THAT WE NEED XNA FOR METRO? I learnt XNA (and am pretty clueless about graphics engines otherwise) and own a very popular windows phone game that is ranked in the top 20 apps. I don't have the budget/time resources to now go learn directX. There are 100s of developers like me. Dont you want their games on Windows 8? Have you seen how many games ipad has? How about at least telling us one way or another if there will be future support for XNA+Metro. Just say yes or no. So other people can about making plans around yours. Thank you. I've been hacking at getting the SwapChainBackgroundPanel working with SharpDx and used in a XAML/C# app … i'm very close to getting it working BUT have had a brilliant experience so far.. I love the 3 approachs you guys have taken and look forward to seeing how this pushes the state of LOB and Metro-XAML apps 🙂 🙂 I agree with RisingStartDevsNEEDxna, I've spent a lot of time learning XNA and I'm having a blast building games in it. please port this over to Win8 or at least give us something similar. I really don't want to go and learn C++ 5 years ago: "yeah, XNA is great, learn it, you'll use for create great games for Windows AND for Windows Phone. It's a GREAT opportunity for you" Septembre 2011: XNA is dead (RIP) Now : "C++ is great for Metro, learn it. It's a GREAT opportunity for you" Future : ??? So please: give us back XNA. I developed few XNA games for WP7, and I was hoping that with Consumer Preview we will get XNA framework for Windows 8, but since nothing happened and Microsoft is constantly silent about it, last week I bought Mac, iPad and MonoTouch licence, and started to migrate my games to iOS. Sadly, I cannot believe that your product managers did not see huge potential of XNA indie developers and you didn't implement XNA for Metro. When Windows Store opens this year there will be probably lot of Metro apps on it, but relatively small number of games because of such decision. I invested lot of energy and time in XNA and my custom game fremework, and I do not want to switch back to C++ or HTML5/JS. Simply I do not have time for that. I know that in some moment community will probably release some kind of XNA wrapper for WinRT, but frankly Microsoft doesn't deserve it. Just give us XNA back. Thats the feedback you should be listening too. Yes please! XNA 5 for windows 8 with directx 11 and Metro support. Absolutely Fabulous news!!! Very, Very happy and excited to see this has been done. This is a huge step in opening the road to top level 3D-graphics and 3D-audio, for even "small" devices. Thank you so very much for listening! Unfortunately, Microsoft seems to have decided that they are somehow better off not being up-front about the future of the various development platforms. "Look, here is this shiny new development framework, don't you love it!?!?!" Meanwhile. we've been building our businesses around technologies that were the shiny new framework 6 years ago, and we can't seem to get a peep out of Microsoft about what the future of those technologies is. We need clarity, and Microsoft is determined to provide anything but. Eric Please, please support XNA with C# on Windows 8 Metro. I do not want to go back to C++ and DirectX. You support XNA with C# on Windows Phone and XBox, please do so for Windows 8 Metro. I hope Windows 8 Metro succeeds, and I want to go along for the ride. Please give us developers the tools to do this. With no clear direction on XNA for WinRT we are now looking MonoTouch Game development for Android and iOS. After shafting the XNA community like this, Microsoft does not deserve the attention of Indie Game Developers to fill it Marketplace. I will not be taking a step backwards to use C++ for game development. I've stated repeatedly over the years that the only way I'd use C++ again is if someone like Bioware or Blizzard hired me. There's just no reason for an indie to use C++ for game development when an awesome tool like XNA exists. I'll just be making desktop games for Win 8 non-ARM devices I guess. That's fine with me. Folks requesting XNA should check out the sharpdx project. It will allow you to write Metro DirectX apps with C#. Not XNA obviously, but you should at least be able to reuse a lot of code. All this xaml and DX stuff is AWSOME but please PLEEEAASE enable it for desktop apps as well, not just Metro apps! we could really really use this for some apps but they also need the multitasking, multi window support of desktop apps! As for XNA, relax guys, once we have these native interfaces a managed layer can be built intop of that. SlimDx already allows you to get an IntPtr to the swapchain so you could pass that in to these elements and do all your development in c# 😀 This is very sad indeed. I don't want to give up all of the fantastic tools and the excellent content pipeline in XNA to go back to C++, write everything your self and good luck managing memory. Going to a new platform with new features is one thing; going backwards like this is just ludicrous. Seriously, who's driving this boat? My vote for XNA with its ease (C#) and platform embracing strategy. Please drop a single line on that subject / status. Even if not commenting on XNA itself, I would really appreciate to know more about the "why not commenting?". One last note in addition to last comment: With C# you've done such a great job developing a language so powerful, clean and fluently to use like no other language. Congratulations for that! In my opinion you could even knock out Google and Apple by buying and smoothly integrating Mono / Xamarin. Immediate win on the coolness factor – long term win on the dev community. I mean how cool is that: Embracing Windows, Xbox, WinPhone, iOS, Android, Linux, everything with one language / runtime. Just give it some thought – maybe not winning each single space like phones, but winning on the overall tools and the devs. I agree with small_mountain_0705 above. We need this functionality in desktop apps – either in WPF or allow WinRT/XAML-based desktop apps. And another vote for small_mountain_0705 — this should be in WPF. I realize you've been spending time tooling up for Metro. Please spend some time allowing those of us who are entrenched in WPF to HAVE a successful future as well. Add my vote for adding this to WPF. We didn't even get the airspace fix for WPF that we've been begging for for ages–it was removed between the DP and CP! I am also semi-depressed about the lack of XNA. I have put a lot of effort into some top notch games for WP7 and it seems like we are getting screwed here. We all need to make some big decisions about whether we are going to stay with the windows platform or not unless they decide to support XNA. +1 for official GPU-programming support directly from C#, please. The lack of this makes development for Metro feel like a huge leap backwards for several developers. The Playstation Suite SDK by SONY can live off supporting only C#, one would think MICROSOFT at least adds the same level of support for C# as they do for C++. XNA has one fundamental problem however. It is kind of hard-coded for DirectX 9. Make something for Metro that is capable of keeping up with constant evolution of GPUs, but is similar to XNA. Heaven forbid, take C# into consideration while designing the future version of DirectX… Comment attempt 4 There is a need for managed game development framework. It may not be backward compatible with XNA but I will be damned before I learn C++ to try out game ideas. BTW is there some centralized place where the "BRING BACK XNA!!!1!" rage is concentrated so I can join? I second that a managed 3D development framework is a must. I own a product (3D terrain viewer), where I need 3D, but I do not need to squeeze out every fps, nor all the latest shaders bells and whistles. I did that piece in C# with the already obsolete Managed DX, and I was successful to pull this out in a few months. Should have I done it solely in C++, which I have only modest knowledge of, that would take me much more time. Microsoft, you really need to give us something similar to XNA. There is no need for it to be fully compatible, but should be close enough to let developers migrate easily. And be quick about it. I can see everybody here, like me, is complaining about not having XNA on W8! 🙁 For everyone who thinks this should be supported in C# as well: visualstudio.uservoice.com/…/2730068-greatly-increase-support-for-gpu-programming-in-c- Personally I'm delighted to find that native C++ is once again a first-class citizen in the Windows development ecosystem. SwapChainBackgroundPanel is exactly what I need to bring my skillset directly to Metro and the app store. Bravo. My question is, how will this work on Windows 8 tablet/phone? For anybody else who wants to vote on getting SwapChainBackgroundPanel et al into WPF: visualstudio.uservoice.com/…/2644759-provide-better-directx-support-in-wpf I'm using SharpDX in my Win8 metro project. 100% C#, full hardware acceleration, pixel shader support, it's great. It's not XNA: it's basically using the DirectX api directly but in C#. There's an alpha-stage project called "ANX" that wraps SharpDX to create a "source compatible" framework. Quote: "Source compatibility means, that you can "translate" a XNA game project to be a ANX game project by simply replacing all namespaces from Microsoft.XNA.Framework to ANX.Framework. I can't speak to the accuracy of that statement, but I can speak to the utter awesomeness of SharpDX.
https://blogs.msdn.microsoft.com/windowsappdev/2012/03/15/combining-xaml-and-directx/
CC-MAIN-2017-09
refinedweb
1,995
71.55
edu/sts Python Classroom Course Manual Updated: 08-28-2013 Table of Contents 1.0 Introduction4 1.1 About this Class4 1.2 Topics4 1.3 Required Skills4 1.4 Accessibility4 2.0 Introduction to Python 5 2.1 Introduction to Python5 2.2 Python Features5 2.3 The Python Interpreter6 2.4 Getting Help with Python6 3.0 Simple Data Types7 3.1 Variables7 3.2 Strings7 3.2.1 Exercise: Creating Strings 7 4.2 Tuples 20 4.2.1 Exercise: Creating and Indexing a Tuple20 4.3 Dictionaries 21 4.3.1 Exercise: Creating and Modifying Dictionaries21 4.3.2 Dictionary Methods22 5.0 Functions 23 5.1 Creating User-Defined Functions 24 5.1.1 Exercise: Creating a Function 24 1.0 Introduction 1.1 About this Class This course has been designed to give an introduction to the Python programming language. Python is a dynamic programming language that is used in a variety of application domains. It is a language that can be compared to Java, Perl, Ruby, and a few others. This manual will include explanatory text along with code examples that you should be typing to follow along. Any text you should be coding will be indicated with this font single space font. 1.2 Topics The topics for this course will cover the basics of Python and will provide allow you to further develop your Python skills. The topics for this course include: Introduction to Python Simple and Sequence Data types Functions Control Structures Standard Library Scripts 1.4 Accessibility Python runs on many different computing platforms: Windows, Mac, and many brands of Unix. Python can be downloaded and installed directly onto your machine from. Python comes pre-installed on Mac machines and can be accessed through the terminal. Python version 2.7.x Page 4 Page 5 Windows users will need to install the Python 2.7 interpreter onto their machine through the following link:. Follow the installation instructions and the Python interpreter should install successfully on your machine. Open up the Python Interpreter on your machine. The Python interpreter is an invaluable tool for acclimating new users to the language. Because we are using Python as a scripting language, anything that works in the interpreter will work in a script. Conversely, scripts can be ran entirely from the interpreter, as the interpreter maintains state, allowing you to set variables and access them from the interpreter using later commands. Page 6 3.2 Strings Strings in Python are unique because they are immutable. This means that the string cannot not modified directly. Many mutable data structures provide methods to allow you to dynamically change values of a given variable or data structure. With immutable data types, the developer must create a new instance of the data type (in this case a string) and assign the variable to the new data structure. This may sound complicated, but in practice it is very easy to create new strings by combining other strings. Simply remember that string methods will not directly modify the contents of a variable. Strings are defined in Python by using pairs of single quotes text or double quotes text. Multi-line string statements are defined using pairs of 3 quotes, single or double: This is a Multi-line statement in Python Page 7 Congratulations! You have typed your first string in Python. Strings in Python are easily combined and these combinations can be modified using comma and the plus (+) operator. Type the following in the Python interpreter and press return after each: print Hello, World! print Hello + World! The common operator will put a space between the strings and the plus (+) operator will not include a space when the strings are combined. We have created some simple strings, but these are not very helpful by themselves. String methods will allow us to modify variable strings. The first two methods that will be introduced are str.upper()and str.capitalize(). Note that str is a placeholder for the name of your string. The upper method will take the string and return all letters as uppercase. The capitalize method will make the first letter of the string capitalized and the rest of the letters lowercase. Note that since these are just returned values, the string itself is immutable and will maintain the same value unless it is redefined. Lets practice using some string methods. Page 8 Since the string s is immutable, that means the string methods are not making changes to s. They are simply returning modified values of the string. Lets say that we want to modify the values of s so that all the letters in the string are uppercase. Type the following code into the interpreter: s = s.upper(). There will not be a return value for this code. Enter s and press return to see the changes to the string. Page 9 The first line of code just returns the original value of the string because the replace method is case sensitive. The returned values are not permanent; the string s would need to be redefined as done previously to retain any of the changes from the replace method. The principles of string indexes carry over to other data types like lists, dictionaries, and tuples which will be discussed later in this course. Page 10 By omitting index values for this syntax, the returned value will start with the 0th index and end with the last index of the string. Similarly, the second line of code begins with the 3rd index ending with the last index of the string. The third line begins with the 0th index and ends with the 7th index. This is because the j index is non-inclusive. The final line of code returns a value that starts with the 3rd index and ends with the 7th index. The returned results from both methods are the same. This is because we switched the index values in both sets of brackets as well as in the format method. You can include variable or string names within the format method for additional flexibility. {name} pities the fool and so do {0}.format(you, name=Mr. T) Page 11 The output from these lines of code seem to be correct. However, the last line gives us a different value than we expected. This is because we are dividing an integer (42) by another integer (5). By dividing an integer by another integer, the output will be another integer. The resulting integer will not contain any decimals. The decimal values are truncated off of the result. While less concerning, multiplying an integer by an integer will also result in an integer. We need to multiply or divide by floats to Page 12 The resulting print statements are each a floating point number that includes a decimal. This is because floating point numbers were used in each of the three statements. It is important to remember that to get an output of a floating point, one of the inputs for a mathematical expression should be a floating point. In other programming languages, such as Java, integer variables can be incremented using the ++ operand. This is not the case in Python. If you want to increment a variable in Python, you will need to use the += value operand. The value will be added to x. Think of it as x equals itself plus the value This will be more important once you begin to write your own Python scripts. Type the following code to demonstrate: x=5 print x Page 13 Page 14 The third line of code tells Python to import all functions from the math module into the interpreter. The * is a wildcard and represents all functions. Specific functions could be imported by replacing the * with the function name. The advantage of importing math functions is so that you do not need to prepend math. To the front of the function call. Notice that in both cases of our square root calls, the outputs were floats. In general, math module functions will return floats. This is also the case for the ceiling and floor functions. Lets try them out: ceil(4.2) floor(4.2) If you did not import the entire math library with the *, you would need to type math.ceil(4.2) and math.floor(4.2) respectively. You can also work with trigonometry functions in both degrees and radians in Python. These functions consider radians as the default, but its easy to switch between the two. If you recall, 1 radian is equal to 180 degrees. The function math.sin(x) will return the sine of x radians. There are additional functions for the other trigonometric operations. The function math.degrees(x) will convert angle x from radians to degrees. Similarly, math.radians(x) will convert angle x from degrees to radians. Since the math module is imported in our Python interpreter, we can omit math. from the beginning of the functions. sin(1) degrees(1) radians (180) degrees(sin(1)) Page 15 4.1 Lists The List is one of the most flexible data types in Python. A list in Python is a container that holds a number of other objects in a given order. The unique aspect of lists in Python is that the data types within the list can vary from element to element. In other programming languages, such as Java, list elements all need to be the same data type. The additional flexibility makes Python a very powerful and dynamic language. We are going to create an example list, called items that contains an integer, a float, and some strings. After we create this list, we will introduce some additional methods to demonstrate how to manipulate lists. The syntax for creating a list is variable_name = [ 1st element, 2nd element, 3rd element, .. ]. Lets create our first list in the interpreter: items = [1, 2.0, three, four] print items Note that our list includes both an integer and a float, as well as strings that were defined with and . We can see that there is a lot of flexibility that comes along with lists. You can even nest lists inside of lists to create 2D and 3D arrays, but that is outside the scope of this course. Page 16 Lists can also be used like a stack by using the append method to push items on to the list and the pop method to remove items. You can leave the argument for the pop function blank and it will remove the item at the last index of the list. Otherwise, you can define an index for the element to be removed. The function returns the value that is removed from the list, which could then be stored as a variable if desired. Type the following lines of code to see how to use the pop method. items.pop() print items items.pop(2) print items The pop method returns the value that is removed from the list. This returned value can be set to a variable, which applies to any method that returns a value. To demonstrate, we are going to create an integer variable that will be assigned the value of a popped listed element. z = items.pop() Page 17 By calling items[0] and items[2] we have returned the 0th and 2nd indexes of the list items. We can return portions of the list by type items[i:j] as well as some other variations. print items[1:3] print items[0:3] The list items contains 4 elements and is therefore indexed to 3. Remember, the j index is not included in the return value of items[i:j]. It goes up until index j. In our case, if we wanted to Page 18 We can see the issue that was described with the max method. The string four was considered a higher value than the last element 5.0. Lets pop off the string element and use the max function to return the highest value in the items list. items.pop(2) print max(items) Page 19 4.2 Tuples Another sequence data type in Python are tuples. Tuples are similar to lists in the fact that they can hold multiple data types; however, there is one major difference. Tuples are immutable which means that they cannot be modified. This is drastically different than lists, which have multiple methods to modify elements. While there isnt a way to modify this tuple, you can still return indexes just like lists. Returning indexes for tuples uses the same syntax for returning list indexes. print itemsFrozen[1:4] print itemsFrozen[1:] print itemsFrozen[:3] print itemsFrozen[:] Page 20 4.3 Dictionaries A Python dictionary is a container of key-value pairs. The dictionary will store a value with an associated key, and there can be many key-value pairs for a given dictionary. Dictionaries differ from lists or tuples because the keys will be called to return a value rather than an index. Dictionaries can be updated and key-value pairs can be added as well. The value of a key can be updated by calling the key and resetting the value. This same syntax is used to create a new key-value pair to be added to the dictionary. nick[age] = 21 print nick[age] nick[school] = UW-Madison print nick[school] print nick Page 21 Page 22 5.0 Functions A function is a block of organized, reusable code that is used to perform a single, related action. Functions provide better modularity for your application and a high degree of code reuse. As you know, Python has a number of built-in functions like print(), but you can also create your own functions which are called user-defined functions. Page 23 This section of the manual was creating using the Windows version of IDLE, so your current working directory may be different. To change your Python working directory please refer to Python documentation available online. The current working directory is where you will want to save your Python modules so that they can be imported by the interpreter. We are going to create our first function inside of the new window. Before we create a new function, there are rules that need to be followed for defining functions: Function blocks begin with the keyword def following the function name (ex: functionName) and parentheses () Any input parameters should be placed within these parentheses and separate by commas. You can also define parameters inside of these parentheses The code block for every function starts with a colon (:) and is indented Page 24 print arg 1 print arg 2 Save this function as sts.py inside of the current working directory of Python. For the example outlined in this manual, that would be inside of the C:\\Python27\ directory. Now that the module has been saved, it is time to import the module inside of the Python interpreter and call the function. You must use the following syntax to correctly call the function inside of the module: moduleName.functionName(arguments). Inside of the interpreter Type the following code: import sts sts.text(hello, world) You have created and ran your first user-defined function in Python. You could pass any data type into this function such as integers, floats, arrays, etc and the function would print those arguments. The return statement has been omitted from our function because there are no values that we need to return. Page 25 This can be avoided by including default arguments in the function definition. This allows you to include less parameters in the function call and the default parameter will be used. Additionally, you can overwrite the default parameter as you would normally call the function. print arg 1 print arg 2 Save this file once you have typed this block of code. Python only allows you to import a module once in the interpreter, so you will need to reload the file using the reload() function.. Type reload(sts)in the interpreter and then we can call the new function. Type these lines of code into the interpreter after you have reloaded the module: sts.text2(hello) sts.text2(hello, world) You can see that default arguments can be overwritten if needed. Otherwise default arguments help shorten the function calls. This can be very useful with more complex functions. Page 26 Python knows similar control structures as other programming languages. Control statements include if statements, else-if statements, else statements, for loops, and while loops. This section will outline examples of these control structures in Python modules. else: Save this file as control.py in the current working directory. You can run this module as a script by choosing Run > Run Module in the control.py window. The syntax for an if statement is if followed by an expression to check, such as number>15 and ended with a colon (:). The block of code to be executed by the if statement should be indented with one Page 27 You should see the output regarding the number four. The syntax for an else-if statement is elif followed by an expression to check and ended with a colon (:). The block of code to be executed by the else-if statement should be indented underneath the elif block. Page 28 This for loop demonstrates a number of things. First, the item defined in the for loop is used to reference the sequence of information. Each time the heading line executes, it implicitly assigns a new value to the item (in this case i). After each execution of the heading line, the statements in the indented block are executed, generally using the new value for the item assigned in the heading. The sequence can be a list of items or values. To demonstrate this we will create another for loop with the range() function in the Python interpreter: name=nick for i in range(len(name)): print name[i] <enter> The range function generates a list containing the following values: [0,1,2,3]. Since the length of nick is 4, the range function creates a list with 4 elements indexed at 0. The loop then prints off each element in the variable name. Another type of loop that is commonly used in Python is the while loop.The while loop requires initialization of a variable. The while loop will run until a condition is met for the initialized variable, executing lines of indented code after each iteration. The syntax for a while loop is while condition is true: followed by lines of indented code to execute. Type the following lines of code into the Python interpreter to create a while loop: x = 1 while x<=10 print x x+=1 <enter> Page 29 This while loop prints the values 1 through 10. The += operator is used to iterate through the while loop. One way to think of the statement x+=1 is x equals itself plus one. Python does not support the ++ operator to iterate by one like other programming languages. This allows you to call functions from the math module within the Python interpreter or script. Alternatively you could import all of the math functions with the following: from math import *. This tells Python to import all of the math module functions so that you can call them directly without prepending math. to the front of the function call. The asterisk * is a wildcard operator, similar to searches in Google or a database. You can also import specific math functions such as from math import pow: Page 30 The advantage to importing specific functions is that it allows your modules to run faster. This makes a difference in large programs; however, it will likely be negligible for your first programs. This function produces a random number from 0.0 up to but not including 1.0. This has a number of uses in programming. Another useful function is the uniform(a,b) which produces a random number from a to b. The end value b may or may not be included based on rounding of the equation a + (b-a) * random(). random.uniform(5,50) random.uniform(0,10) random.uniform(-5,-1) Page 31 The random module has a number of other functions that are further detailed in Pythons online documentation. Page 32 open_new() Page 33 confirmation = confirmation.lower() if confirmation == 'y': file.write(text_answer) file.close() def dump_file(): file.close() Page 34 if text_answer == None: else: print """ 1: Enter in input 2: Write input to file 3: Dump file contents 4: exit program """ The first line is a comment describing this portion of the code. Comment lines are added using the # symbol. It is always good practice to comment your code so that another user can follow your thought process. The next line begins a while loop with while 1:, which is the same as while true:. This means that the while loop will continue to run until a break statement is encountered. The next block of code is an if and else statement pair that prints off the currently stored text value. The following block of code reads the 4 options to the user. Enter the next block of code into the script.py file: # TAKE USER INPUT answer = int(answer) if answer == 4: elif answer == 1: break text_answer = take_input() elif answer == 2: Page 35 else: write_file(text_answer) elif answer == 3: dump_file() else: continue The first three lines of code for this block is going to take the raw input from the user, either 1, 2, 3, 4 as a string and then cast it into an integer. The remaining lines include an if statement with else-if conditions and a final else statement to catch the remaining inputs. The first if statement checks to see if the user entered a value of 4. If so, the break statement will close the while loop and end the script. The next line is an else-if statement that checks if the user entered a value of 1 and calls the take_ input() function and sets the value to the text_answer variable that we initialized at the beginning of the script. The next else-if statement checks for an input of 2. An if else statement pair is nested inside of the elif statement to check and see that there is text to write to the file. The elif statement will call the write_text() function and write the text_answer to a text document named storage.txt. The last else-if statement checks for a user input of 3. It will call the dump_file() function, which will read the file contents and print it off in the Python interpreter. The block of code ends with an else statement that tells the user to enter a valid value. The continue statement will tell the while loop to continue after the else statement tells the user that they chose an invalid option. Your final script should look like this: Page 36 Save your script.py file and then we are ready to run the script. In the Python interpreter enter import script and see your script run. It will prompt you for an input. Page 37 We have successfully written to a text file saved as storage.txt. It will be saved in the current working directory of the Python interpreter. You should be able to see this file as a basic text document: Back in the Python interpreter, chose 3 to read back the file contents: You have successfully run your first Python script! Enter a value of 4 into the interpreter to close the script. Feel free to play around with the script.py. Page 38 The following examples are shown to demonstrate some of the built in power of the python language. The commands are meant to be run directly from a terminal, not from the python interpreter. When using the python command, we can pass the -m option to execute a module as a script. Page 39 Page 40 Page 41 Page 42 Page 43 Page 44 Page 45 Page 46 Page 47
https://ru.scribd.com/document/271098084/Python-Manual
CC-MAIN-2019-26
refinedweb
4,032
73.68
1. I have an interactive animation in Flash SWF which is imported to my document. 2. This SWF have a button with the following code: import flash.external.ExternalInterface /* Stop at This Frame */ stop(); btnPart1Instance.addEventListener(MouseEvent.CLICK, fl_MouseClickHandler); function fl_MouseClickHandler(event:MouseEvent):void { var retn1:Object=ExternalInterface.call( "eval", "this.gotoNamedDest('G10.1013694')"); var retn2:Object=ExternalInterface.call( "eval", "this.delay = true"); // Trying to force page to refresh } 3. This code allows me to jump to the page I wish; however, the rectangle in which the Flash SWF shows in front of the page. 4. I wish to force refresh the page so this rectagle disappear by using Javascript. What is your opinion? How can I make this work? Retrieving data ...
https://forums.adobe.com/thread/1028818
CC-MAIN-2017-51
refinedweb
122
54.18
Recently I had to determine, given a specific date, what the recurrence of that date’s day of the week was within the month; for example, today (the 20th of March 2009) is the 3rd Friday this month. In addition, I also had to determine whether it was also the last recurrence of the day within the month, for example if the date was the 27th of March, 2009 then it would be the fourth Friday in March, but also the last Friday in March.. see what I mean? To support this, I had to do a little design work to find a solution which would be fairly quick and compact. It obviously had to take into account leap years, more than four occurrences of a day within a month (the max is five), as well ad months consisting of 28, 29, 30 and 31 days in length and – importantly – what day (of the week) the month started on! The trick here was to use the [the .Net Framework’s] DayOfWeek enum in concert with DateTime’s handy .DaysInMonth() function. The DayOfMonth assigns a numeric value to each day, and the week starts with a Sunday, as below. public enum DayOfWeek { Sunday = 0, Monday = 1, Tuesday = 2, Wednesday = 3, Thursday = 4, Friday = 5, Saturday = 6, } Simply put, a month could start on any day of the week (obviously), so we have to deal with our edge cases – weekends – where the day Sunday (DayOfWeek == 0) or Saturday (DayOFWeek == 6). Taking this into consideration, I wagered that by working out the number of days in a month in addition to the day on which the month started (relative to the day requested), we could reasonably determine the occurrence of the specified date within the month, e.g. the 15th of March (Sunday) relative to the 1st of March (also a Sunday) – are you with me so far? Should a month not start on a weekend day or a day which comes after the requested day (e.g. requested date is a Tuesday, but month starts on a Thursday) then we have to add an extra recurrence (because there would be an extra occurrence within the first week (7 days) of the month) – does that make sense? Here’s a complete sample written in C#. Apologies for the use of an out param. using System; using Microsoft.VisualStudio.TestTools.UnitTesting; namespace SampleProject { class Program { static void Main(string[] args) { bool isLast = false; Assert.IsTrue(GetRecurrence(2009, 3, 29, out isLast) == RecurrenceEnum.Fifth, "Should be fifth (and last) Sunday in March"); Assert.IsTrue(isLast, "Should be last day in month"); } /// <summary> /// Represents the possible recurrence of a day within a month /// </summary> public enum RecurrenceEnum { First = 1, Second = 2, Third = 3, Fourth = 4, Fifth = 5 } /// <summary> /// Obtain the occurrence of a day within a month, e.g. First Saturday or Fifth (and last) Friday /// </summary> /// <param name="year">Year of date to check</param> /// <param name="month">Month of date to check</param> /// <param name="day">Day of date to check</param> /// <param name="isLast">Whether this is also the last occurrence within the month</param> /// <returns>the occurence of the day within the month</returns> public static RecurrenceEnum GetRecurrence(int year, int month, int day, out bool isLast) { isLast = false; //set the out param, defaulted to false int numberOfDaysInMonth = DateTime.DaysInMonth(year, month); int firstDayOfMonth = (int)new DateTime(year, month, 1).DayOfWeek; int checkDayOfWeek = (int)new DateTime(year, month, day).DayOfWeek; int lastWeekInMonth = (numberOfDaysInMonth / 7); int checkWeekInMonth = (day / 7); //not directly divisible (i.e. not Feb (28 days)) if (numberOfDaysInMonth % 7 != 0) { lastWeekInMonth += 1; } //day (date) not directly divisible by 7 (i.e. not 7th, 14th etc), so add 1 if (day % 7 != 0) { checkWeekInMonth += 1; } //is this the last week in the month? if (lastWeekInMonth == checkWeekInMonth) { isLast = true; } //if the month started with a weekend day or a day which comes before the check date then //we just return the week (i.e. second week == second recurrence) if (firstDayOfMonth == 0 || firstDayOfMonth == 6 || firstDayOfMonth <= checkDayOfWeek) { return (RecurrenceEnum)checkWeekInMonth; } //else it means we have an extra recurrence before the day requested, so add one recurrence else { return (RecurrenceEnum)checkWeekInMonth + 1; } } } }
https://sanderstechnology.net/2009/find-the-occurrence-of-a-day-within-a-month-for-a-given-date/9892/
CC-MAIN-2021-10
refinedweb
696
57.91
On Sun, Apr 12, 2009 at 01:32:54PM +0200, Jan Blunck wrote:> Am 11.04.2009 um 19:49 schrieb "Paul E. McKenney" > <paulmck@linux.vnet.ibm.com>:>>> On Fri, Apr 10, 2009 at 06:13:57PM +0200, Jan Blunck wrote:>>> I think it is wrong to unconditionally take the lock before calling>>> atomic_dec_and_test() in _atomic_dec_and_lock(). This will deadlock in>>> situation where it is known that the counter will not reach zero (e.g. >>> holding>>> another reference to the same object) but the lock is already taken.>>>> The thought of calling _atomic_dec_and_lock() when you already hold the>> lock really really scares me.>>>> Could you please give an example where you need to do this?>>>> There is a part of the union mount patches that needs to do a union_put() > (which itself includes a path_put() that uses atomic_dec_and_lock() in > mntput() ). Since it is changing the namespace I need to hold the vfsmount > lock. I know that the mnt's count > 1 since it is a parent of the mnt I'm > changing in the mount tree. I could possibly delay the union_put().>> In general this let's atomic_dec_and_lock() behave similar on SMP and UP. > Remember that this already works with CONFIG_SMP as before Nick's patch.I asked, I guess. ;-)There is some sort of common code path, so that you cannot simply callatomic_dec() when holding the lock? Thanx, Paul>>>>> Signed-off-by: Jan Blunck <jblunck@suse.de>>>> --->>> lib/dec_and_lock.c | 3 +-->>> 1 files changed, 1 insertions(+), 2 deletions(-)>>>>>> diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c>>> index a65c314..e73822a 100644>>> --- a/lib/dec_and_lock.c>>> +++ b/lib/dec_and_lock.c>>> @@ -19,11 +19,10 @@>>> */>>> int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)>>> {>>> -#ifdef CONFIG_SMP>>> /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */>>> if (atomic_add_unless(atomic, -1, 1))>>> return 0;>>> -#endif>>> +>>> /* Otherwise do it the slow way */>>> spin_lock(lock);>>> if (atomic_dec_and_test(atomic))>>> -- >>> 1.6.0.2>>>
http://lkml.org/lkml/2009/4/13/19
CC-MAIN-2014-49
refinedweb
320
75.1
Subject: Re: [OMPI devel] System V Shared Memory for Open MPI: Request forCommunity Input and Testing From: Paul H. Hargrove (PHHargrove_at_[hidden]) Date: 2010-06-10 04:43:54 Sylvain Jeaugey wrote: > On Wed, 9 Jun 2010, Jeff Squyres wrote: > >>)... > I'm sorry, but I think System V has many disadvantages over mmap. > > 1. As discussed before, cleaning is not as easy as for a file. It is a > good thing to remove the shm segment after creation, but since > problems often happen during shmget/shmat, there's still a high risk > of letting things behind. > > 2. There are limits in the kernel you need to grow (kernel.shmall, > kernel.shmmax). On most linux distribution, shmmax is 32MB, which does > not permit the sysv mechanism to work. Mmapped files are unlimited. > > 3. Each shm segment is identified by a 32 bit integer. This namespace > is small (and non-intuitive, as opposed to a file name), and the > probability for a collision is not null, especially when you start > creating multiple shared memory segments (for collectives, one-sided > operations, ...). > > So, I'm a bit reluctant to work with System V mechanisms again. I > don't think there is a *real* reason for System V to be faster than > mmap, since it should just be memory. I'd rather find out why mmap is > slower. > > Sylvain > _______________________________________________ > devel mailing list > devel_at_[hidden] >. -Paul -- Paul H. Hargrove PHHargrove_at_[hidden] Future Technologies Group HPC Research Department Tel: +1-510-495-2352 Lawrence Berkeley National Laboratory Fax: +1-510-486-6900
http://www.open-mpi.org/community/lists/devel/2010/06/8089.php
CC-MAIN-2014-41
refinedweb
256
65.12
class in UnityEngine.ScriptingCambiar al Manual PreserveAttribute prevents byte code stripping from removing a class, method, field, or property. When you create a build, Unity will try to strip unused code from your project. This is great to get small builds. However, sometimes you want some code to not be stripped, even if it looks like it is not used. This can happen for instance if you use reflection to call a method, or instantiate an object of a certain class. You can apply the [Preserve] attribute to classes, methods, fields and properties. In addition to using PreserveAttribute, you can also use the traditional method of a link.xml file to tell the linker to not remove things. PreserveAttribute and link.xml work for both the Mono and IL2CPP scripting backends. using UnityEngine; using System.Collections; using System.Reflection; using UnityEngine.Scripting; public class NewBehaviourScript : MonoBehaviour { void Start() { ReflectionExample.InvokeBoinkByReflection(); } } public class ReflectionExample { static public void InvokeBoinkByReflection() { typeof(ReflectionExample).GetMethod("Boink", BindingFlags.NonPublic | BindingFlags.Static).Invoke(null, null); } // No other code directly references the Boink method, so when when stripping is enabled, // it will be removed unless the [Preserve] attribute is applied. [Preserve] static void Boink() { Debug.Log("Boink"); } } For 3rd party libraries that do not want to take on a dependency on UnityEngine.dll, it is also possible to define their own PreserveAttribute. The code stripper will respect that too, and it will consider any attribute with the exact name "PreserveAtribute" as a reason not to strip the thing it is applied on, regardless of the namespace or assembly of the attribute.
https://docs.unity3d.com/es/2019.1/ScriptReference/Scripting.PreserveAttribute.html
CC-MAIN-2021-25
refinedweb
263
59.6
NAME sigreturn - return from signal LIBRARY Standard C Library (libc, -lc) SYNOPSIS #include <signal.h> int sigreturn(const ucontext_t *scp); DESCRIPTION The sigreturn() system call allows users to atomically unmask, switch stacks, and return from a signal context. The processes signal mask and stack status are restored from the context structure pointed to by scp. The system call does not return; the users stack pointer, frame pointer, argument pointer, and processor status longword are restored from the context. Execution resumes at the specified pc. This system call is used by the trampoline code and longjmp(3) when returning from a signal to the previously executing program. NOTES This system call is not available in 4.2BSD hence it should not be used if backward compatibility is needed. RETURN VALUES If successful, the system call does not return. Otherwise, a value of -1 is returned and errno is set to indicate the error. ERRORS The sigreturn() system call will fail and the process context will remain unchanged if one of the following occurs. [EFAULT] The scp argument points to memory that is not a valid part of the process address space. [EINVAL] The process status longword is invalid or would improperly raise the privilege level of the process. SEE ALSO sigvec(2), setjmp(3), ucontext(3) HISTORY The sigreturn() system call appeared in 4.3BSD.
http://manpages.ubuntu.com/manpages/jaunty/man2/sigreturn.2freebsd.html
CC-MAIN-2014-42
refinedweb
224
55.84
Archived talk:Main Page 20110903 Forum For Discussion Does anyone else think that Wiki Discussion pages are an absolutely terrible way to communicate? It's fine for individual page feedback, but has anyone considered hosting a normal chat forum for Robocode? Skotty 00:08, 26 May 2011 (UTC) I don't think they're terrible, but I am curious to check out something like LiquidThreads, which might be a good fit with the large role that discussion has on this wiki/in this community. --Voidious 02:09, 26 May 2011 (UTC) IMO, Wiki Discussion is very good, because it allow very easy comment structuring, and very flexible way of communicating. I personally think that User_talk page should be use for thread not related to other topics. The only bad thing IMO is that is is not newbie-friendly (more like newbie-enemy). --Nat Pavasant 06:02, 26 May 2011 (UTC) Why is the "Robo Wiki" icon in the upper left corner so small? The headline speaks for itself. --Awesomeness 21:08, 3 May 2009 (UTC) So small? It seems just right to me really. --Rednaxela 21:56, 3 May 2009 (UTC) Because it is the same size as the old wiki =) » Nat | Talk » 23:41, 3 May 2009 (UTC) I have a solution to this! I was bored so I drew this (though it is probably too big!). I realize I forgot the radar, but to be honest, from this angle it would block to much however. Just consider it a drone! --Chase 21:50, 20 June 2010 (UTC) - You could just said that it is a Droid instead of saying that you forgot them... --Nat Pavasant 13:36, 21 June 2010 (UTC) - Looks really nice! I like it. Of course I have a soft spot for the old one, but this one's also very true to the original idea. And the text is way more readable. --Voidious 00:05, 22 June 2010 (UTC) I fixed some of my shadow work in this. I made the wording a bit more visible as well (by adding more depth to it). I also made the book lines heavier. — Chase-san 06:25, 20 July 2010 (UTC) Obviously this is to big to go in the corner, but I made a reduced size one. — Chase-san 06:28, 20 July 2010 (UTC) Was a decision ever reached on this, because I am considering redrawing this (but better). — Chase-san 20:13, 18 January 2011 (UTC) I'm up for changing it and I don't think anybody objected. Just fell off my radar - sorry! I think my only criticism of that first one is that the "IKI" is a little smushed together. --Voidious 20:20, 18 January 2011 (UTC) Well (I personally think) my art has improved considerably since I drew this. — Chase-san 21:31, 18 January 2011 (UTC) - If nothing else, I will go over it with a pen and clean up the line work. — Chase-san 21:38, 18 January 2011 (UTC) Here is the new version. It's a bit cleaner, though it doesn't have as much character (because it is so much cleaner). — Chase-san 02:01, 19 January 2011 (UTC) Nice stuff, looks pretty good to me :) --Rednaxela 06:04, 19 January 2011 (UTC) So are we going to use it? — Chase-san 16:12, 8 February 2011 (UTC) Just updated it. I think it looks really nice, it really changes the look of the page a lot more than I expected. Let me know if anyone sees it looking weird anywhere - I'm not exactly a CSS guru. :-P Nice job, Chase! --Voidious 17:40, 8 February 2011 (UTC) - You just change the $wgLogo, right? I don't think it would affect the page at all. I does feel weird at first, I agree. --Nat Pavasant 15:18, 9 February 2011 (UTC) What about the favicon? --Nat Pavasant 10:59, 11 February 2011 (UTC) Does robo rumble work? hi im new to robocode, been doing it as part of my uni course and i was wondering, is it possible to run roborumble at home anymore because ive follwed the instructions and none of the battles ive been doing have been uploading.... Quietus 15:47, 21 November 2008 (UTC) Yep! It sure as he heck still works! The thing is, the rumble.fervir.com server has been kind of broken for a while. You need to point your client at this URL instead now (See here for more information). There's new fanciness in that server too :) --Rednaxela 19:48, 21 November 2008 (UTC) Article count A couple questions about the article count (after having some trouble Googling for answers). First, why isn't it updating automatically? Is that something I can trigger to update, or add to the "job queue", does anyone know? (Notice if you edit / preview it is higher than 43, which it reads on the main page right now.) Second, what qualifies as an "article"? Is there a minimum length that a page needs to be (other than not being a user or talk page) to qualify as an article? --Voidious 19:56, 12 November 2007 (UTC) - The main page just updated, it now says 48 articles. Also, the statistics page says that there are 145 pages total, but it is excluding, "talk pages, pages about RoboWiki, minimal 'stub' pages, redirects, and others that probably don't qualify as content pages." --AaronR 20:00, 12 November 2007 (UTC) According to the MediaWiki wiki (now that's a mouthful), the main page will come up to date as soon as its HTML cache is invalidated, at which point all of the templates, etc. will be transcluded again. Don't know if that helps... --AaronR 20:27, 12 November 2007 (UTC) Font size Just out of curiosity, why is the font size so large here compared to, say, Wikipedia? I know, I know, it's the same as the old wiki's font, but that wiki didn't have a sidebar. --AaronR 07:04, 13 November 2007 (UTC) - Primarily because I thought "x-small" was just too small, and yes, it was also just sooo much smaller than the old wiki. I also figured that with the skins options, we could easily give people more choices to choose their own style, anyway. I will confess that tiny fonts for the sake of sleeker designs is a major pet peeve of mine. :-P --Voidious 07:12, 13 November 2007 (UTC) Smileys :-) I would be relly nice if we could somehow support smile, e.g. just as simple as stating: [[Image:HappySmiley.png]] --Flemming N. Larsen 09:04, 28 November 2007 (UTC) Wikimedia Commons has a whole section of GFDL'd or public domain smileys (look at the link at the bottom for more). I don't really see the point though. If you want to upload them and use them, feel free, but I'll stick with =) on the wiki. -- AaronR 01:21, 30 November 2007 (UTC) Old wiki The old wiki is not working (error 500), and so my roborumble client (it cannot find the list of partecipants), but i see many client is uploading. What is your solution? --Lestofante 10:35, 1 Dic 2008 (UTC) The clients currently running are just using the last copy of the participants list they downloaded from the old wiki. I emailed PEZ a bit ago and got a reply that he'll look into it so hopefully the old wiki will be up again in not too long. If that ends up taking longer than expected though, we could update/fix the participants page on the new wiki and point clients at that instead. (Note: Don't point clients at the one on the new wiki just yet, it's out of date and such. It needs to be copied over from a downloaded copy of the list from the old wiki) --Rednaxela 12:00, 1 December 2008 (UTC) maybe we can drop definitely the old wiki for roborumble client, or better every ramble server can implements it's own participants list, maybe integrated with general wiki list --Lestofante 15:13, 1 Dic 2008 (UTC) I'd agree that migrating to using the new wiki participants list would be good, though I think some more veteran wikiers/rumblers than myself should give their input before any such switch is made 'official'. As far as keeping a participants list with the rumble server, well, there's only one working rumble server at the moment so I'm not sure what good that would do, and furthermore multiple rumble server would be a bad thing I think becacuse it would divide the processing power that goes towards making the battles. --Rednaxela 17:47, 1 December 2008 (UTC) That's right, then I thing the server have to had a mirror of the wiki's official participants list, so in this case we can continue run our client, simple we cannot modify the list (if the two list are not synchronize together) --Lestofante 21:49, 1 Dic 2008 (UTC) Yeah, the real problem is getting the participants list from the currently-erroring wiki. The RR client has no problem parsing the new wiki format. I may be able to get that from the RoboWiki server when I get home (if it isn't fixed before then). The other issue we might encounter in the future would be when we move this wiki to robowiki.net, and we have RR clients pointing to testwiki.roborumble.org, but I don't think forwarding that URL and/or having people update their clients would be a big issue. I'm glad you contacted PEZ about the old wiki's current problems, I've been out of touch for at least a week... --Voidious 19:03, 2 December 2008 (UTC) Okay to mention it here as well for people who haven't been checking up on it... The old wiki is back up! --Rednaxela 18:43, 9 December 2008 (UTC) mailer error I'm trying to confirm my user e-mail, but I still get "mailer error". I've tried 3 different and working address... and there is a way for automatically sign the edits? --Lestofante 22:00, 1 Dic 2008 (UTC) - I couldn't get the mailer to work either, but you can easily sign your edits using --~~~~. See for more tricks. --Darkcanuck 03:13, 2 December 2008 (UTC) InterWiki Links Who have fully access to this server apart from David Alves? I want both my thai wiki and this wiki a inter-languages link. Please look here for more detailed. » Nat | Talk » 15:16, 15 April 2009 (UTC) I have full access, or at least I do if I can remember the password. =) I'll see about logging in and taking a look at the InterWiki stuff. Sorry it took so long to respond about this. --Voidious 14:50, 16 April 2009 (UTC) Hey Voidious, I believed that the SQL will take less than 1 seconds to copy/past/execute. Plus login/connecting time I think this can be accomplished within a minute so please do asap (or I must say NOW). » Nat | Talk » 16:19, 22 April 2009 (UTC) Hey Nat - I will add the InterWiki link because I'd like to support your efforts to start a Thai Robocode community and wiki. But I really don't appreciate being commanded to do that (or much of anything, really), and especially to do so "NOW". Please keep in mind the RoboWiki's only rule: "Pretty please be polite." --Voidious 18:48, 22 April 2009 (UTC) Cool, I read up on the InterWiki stuff, added your Thai wiki to the database, and posted a link on the main page. As I can't read Thai, please make sure that looks right and edit it if necessary. =) --Voidious 22:54, 22 April 2009 (UTC) Sorry for that rude. But anyways, thank you very much. The link at main page is correct. » Nat | Talk » 01:37, 23 April 2009 (UTC) Database error I didn't know where to put this, but here seems like a decent spot. All my posts are getting this message today (4 times, probably 5 with this one), the posts are still uploaded but the message is: A database query syntax error has occurred. This may indicate a bug in the software. The last attempted database query was: (SQL query hidden) from within function "SearchMySQL4::update". MySQL returned error "126: Incorrect key file for table './wikidb/searchindex.MYI'; try to repair it (localhost)". I don't know if localhost is meant to be from mine point of view, or the server's. Maybe someone else is getting the same messages, I haven't tried to logout/in, because I just thought of that. --zyx 23:47, 5 May 2009 (UTC) - Just like when you go to the mechanic, I didn't get the message this time. Another piece of information may be that all the other 4 posts where at Talk:PwnBot, so maybe the problem relies in that page. --zyx 23:49, 5 May 2009 (UTC) - Just got it again, and again in the same page Talk:PwnBot. --zyx 20:45, 6 May 2009 (UTC) - Is it... My fault?! I created that page. It seems whenever I change a page I get it too. It doesn't matter what page I go to. Also, for the majority of today, (for me at least) it seems your server has been down. I've been unable to connect. --Awesomeness 22:04, 6 May 2009 (UTC) SourceForge.net Community Choices Awards Please nominate Robocode to SourceForge.net Community Choices Awards! Nominate Robocode Anyone mind to put this to MediaWiki:Sitenotice or front page? I not sure if I can post to front page. » Nat | Talk » 10:21, 15 May 2009 (UTC) Please nominate Robocode in category of "Best Project for Academia" and "Most Likely to Change the Way You Do Everything". If you nominate to another category, please post here so other robocoder can nominate in same categories (note that you can nominate in multiple categories) Nevertheless, please nominate! » Nat | Talk » 11:26, 18 May 2009 (UTC) Special:Disambiguations Do anyone know why the Special:Disambiguations has spammy report? » Nat | Talk » 11:23, 18 May 2009 (UTC) Image Uploads It looks like the image uploads folder is not set as writable to the wiki. Is this expected to change? -- Synapse 05:27, 10 June 2009 (UTC) Haven't test yet. If this is true, I think this problem cause due the upgrading of MediaWiki. Voidious, check it please? Another note to Voidious, MediaWiki/1.15 just release =) » Nat | Talk » 13:29, 10 June 2009 (UTC) Not sure why it stopped working, but I'll take a look at the image uploads and upgrading MediaWiki this afternoon. (And making a mental note to test that whenever I upgrade. =)) --Voidious 15:41, 11 June 2009 (UTC) Fixed the uploading issue and upgraded MedaWiki to 1.15. Enjoy. =) --Voidious 20:10, 11 June 2009 (UTC) /wiki/PageName-style url Recently this day I noticed that the /wiki/PageName-style URL is now work. Voidious, why don't you set the $wgArticlePath = "/wiki/$1" in LocalSettings.php? And I wonder why the old server at 174.132.4.195 now has new wiki code, and available in both /wiki/PageName and ?PageName style (but not /w/index.php?title=PageName style). What's going on? » Nat | Talk » 12:56, 27 June 2009 (UTC) Syntax Highlighting It would be great if we could have syntax highlighting for code snippets. This would make code snippets easier to read. Currently, if I want to read a code snippet from the wiki, I would copy and paste it into my favourite text editor. Wikipedia itself seems to use SyntaxHighlight GeSHi.— Duyn 14:56, 17 January 2010 (UTC) User:Voidious/RoboWiki_To-Do --Nat Pavasant 13:41, 18 January 2010 (UTC) Yeah, that's been on my to-do list for a while, I'll try to get it up and running soon. Can't be too tough. =) --Voidious 16:12, 19 January 2010 (UTC) Favicon I just noticed that the new Robowiki doesn't have a favicon. This is easily remedied by copying so that it is accessible at --Skilgannon 18:00, 17 January 2010 (UTC) Robocode on Wikipedia Well, wikipedia:Robocode article is just challenged for reference, as well as a original research. Please help it by adding reference. --Nat Pavasant 15:09, 15 April 2010 (UTC) Honestly, I believe many things on the wikipedia article, at least 2/3rds of the page, just plain don't belong on Wikiedia, due to valid reasons. In fact I'm not sure anything beyond the overview and the first two sections belong, given how Wikipedia's 'original research' and 'notability' criteria apply to such sections. Even those sections which do make sense to keep to desperately need citations. Essentially, I don't feel the sections added by PEZ fit wikipedia's criteria for what belongs. So... I think it does deserve to be challenged for good reason. I don't have time to improve what should stay though. --Rednaxela 16:39, 15 April 2010 (UTC) I'll make more comments on the Wikipedia talk page, but I agree it needs a lot of cleanup and I'm willing to help out with that. I half agree with Rednaxela. The overview and first 2 sections are fine and neutral. The rest could use a lot of cleaning up, but I don't think it all needs just to be axed. Some editing down / revising / adding citations should do the trick. (I think the RoboWiki is a "reliable source"? If not, I don't know what is.) --Voidious 19:03, 15 April 2010 (UTC) - Well, while I would personally consider Robowiki a "reliable source" for my own purposes, it seems to me it violates some of what is noted on Wikipedia:Verifiability#Self-published_sources, in particular how it notes "open wikis" among other things as "largely not acceptable". Also, note the "no original research" policies, and consider that much of the purpose of Robowiki is for "original research". There is a significant difference between a source one trusts, and a source that fits Wikipedia's rules. --Rednaxela 01:54, 16 April 2010 (UTC) - Hmm, thanks. I really don't know much about Wikipedia's policies, but just starting to read up on it now. In that case, you are probably right that a lot of stuff needs to just be axed... --Voidious 13:46, 16 April 2010 (UTC) "Robocode Guidebook" Migrated to Talk:Robocode Guidebook RoboCode Chat / IM So it is to my understanding that currently the only real way for robocoders to talk is on talk pages on this wiki so I had this idea... what if we had an IM type web chat system for Robocoders to talk :) within a few months i could probably get a website set up to do that and it would use the open-source AJAX project. What do you all think of the idea? --Exauge 00:33, 15 May 2010 (UTC) I think we have an (deserted) IRC channel (or not?). But actually I do chat with some Robocoder through GTalk and e-mail. --Nat Pavasant 00:38, 15 May 2010 (UTC) Seems like a lot of work. =) Some of us have each other on IM. I use my handle on AIM and my handle at gmail for GTalk/Jabber. (But my AIM is set so you can't see my status if I don't have you as a buddy.) We used to have oldwiki:ContactInfo, but there's no current page with that kind of info. --Voidious 00:41, 15 May 2010 (UTC) Hi, anyways I've started working on a Robocode talk page and it's getting to be pretty sweet ;) I can probably have it finished in a few weeks (maybe sooner) and anyways if you like it you will all be very welcome to use it and if it's popular enough I might purchase a domain name for it :) and if not, well it's a good way for me to practice my website building skills :) anyways back to studying for tomorrow's exams lol :S --Exauge Well, personally anyway, if were to have a robocode live chat place, I'd strongly prefer it be an IRC channel (i.e. on Freenode?). Mostly because I always have an IRC client running anyway, and those without an IRC client can still sign in via a web page (i.e. [1] and [2]). Also, IRC is probably about the most mature system for such things anyway, and it's nearly maintain free to maintain a channel. --Rednaxela 04:03, 17 May 2010 (UTC) Wall Avoidance moved to Talk:DustBunny... Walking to the center of the wall Well, i'm trying to make a Robot that goes to one of the walls, walk to the center of it an then start to circulate the entire Field, touching the center of each wall. Anyway, I'm having problems to take the Robot to the center of one of the walls. That's what i'm doing: package MyRobots; import robocode.*; import java.awt.*; public class Walle extends AdvancedRobot { double moveAmount; int miss = 0, hit = 0; public void run() { moveAmount = Math.max(getBattleFieldWidth(), getBattleFieldHeight()); setBodyColor(Color.black); setGunColor(Color.orange); setRadarColor(Color.yellow); setScanColor(Color.black); setBulletColor(Color.orange); turnLeft(getHeading() % 90); ahead(moveAmount); turnGunRight(90); turnRight(90); if((getY() + getWidth()/2) == getBattleFieldHeight() || (getY() - getWidth()/2) == 0) ahead(getX() - (getBattleFieldWidth()/2)); if((getX() + getHeight()/2) == getBattleFieldWidth() || (getX() - getHeight()/2) == 0) ahead(getY() - (getBattleFieldHeight()/2)); while(true) { } } The problem is obviously with the 2 'ifs', but I really can't get it. Thanks for any help! getX() & getY() methods returns double value and you cannot compare them using ==, use something like this epsilon = 0.0001; // or another small value if (Math.abs((getY() + getWidth()/2) - getBattleFieldHeight()) < eplsilon) { ahead(getX() - (getBattleFieldWidth()/2)); } // or if (robocode.util.Utils.isNear(getY() + getWidth()/2, getBattleFieldHeight())) { ahead(getX() - (getBattleFieldWidth()/2)); } And it's better to ask questions like this on yours page --Jdev 16:21, 14 September 2010 (UTC) Strategy So I'm in high school and I'm in a first year Java class. Right now our project is to create a robot in robocode. The rules are the each battle will be fought with 3 to 4 players, the final battle will be 1 on 1, and we aren't allowed to use Advance Robot. There's so many opions in Robocode that I have no clue what's the best strategy going into this. Help!!! Thanks. The class JuniorRobot might work well for what you are doing. For your robot just make a class extending JuniorRobot. It could be helpful to override onScannedRobot() and perform actions in there. Also be sure to check out the Radar page and try to implement one of those radar locks because without a decent radar system your robot will be seriously limited. Just spinning the radar might work OK though. ex turnRadarRight(Double.POSITIVE_INFINITY); Also be sure to check out all the tutorials on this site. --Exauge ◊ talk 02:56, 15 December 2010 (UTC) Well... don't get hit. :-) My advice to get started would be: - Check out the sample bots first, learn how and why they work. - Try to beat all the sample bots. - I'd also check with your teacher about using code from the RoboWiki / internet. There's plenty of code and tutorials available, but it's up to your teacher if you're allowed to use that code (even if the code's license says it's ok). - A page like Melee Strategy should be good to get the strategy gears turning. Good luck! --Voidious 03:33, 15 December 2010 (UTC) Question Forum Is there a question forum or something of the sort where people can ask random questions? —Preceding unsigned comment added by Starhammy (talk • contribs) No, but actually you can ask on your talk page. (I forgot to welcome you properly, so it's my bad you don't know where to ask) --Nat Pavasant 12:53, 29 December 2010 (UTC) I got an email from some website (itbhu.ac.in). Apparently an Indian college is hosting a "CodeFest" tournament for about 730 something USD (35000 Indian Rupees, it says), and it's an extension of Robocode; it has different modes including Capture the Flag, and it adds walls to the battlefield itself. My main concern: is it safe to download, ie. it has no viruses or trojan horses whatsoever? --AWOL 20:34, 31 January 2011 (UTC) - I don't know, but the first prize is only 15000, e.g. about 300 USD. The rest is for second, runner up, stuff like that. — Chase-san 23:05, 31 January 2011 (UTC) - I'm rather confident it's safe. It is hosted at a well reputed Indian university, and the event seems to have some notable partners. I'll point this out though... it appears to be using a build of the "custom-battlefields-workspace" branch of Robocode, which hasn't kept up to date with the main Robocode changes since version 1.7.1.4. This means all known bugs in Robocode 1.7.1.4 likely apply, and some of them are rather... serious... for a competition (i.e. a teleport bug, and a kill-other-robot-threads bug) --Rednaxela 00:20, 1 February 2011 (UTC) - I've been playing around with it for the last couple days and it's seems to be just a different version of Robocode. Totally safe so far. Like Rednaxela said, it's just a reworked copy of the custom battlefield branch. I will say this, it's been a lot of fun working on how to detect objects in the battlefield... --KID 20:58, 3 February 2011 (UTC) I've been thinking the Main Page needs a serious overhaul. I'll probably experiment at User:Voidious/Main Page soon, and anyone else is of course free to do the same. Some of my thoughts apply to the wiki more generally, as well. - Rednaxela commented to someone recently about how discussion-based this wiki community is. We've also been looking at LiquidThreads because of that. We should probably point this out. I know I never visit Wikipedia's Recent Changes... I wonder how many people miss out on the community aspect of the RoboWiki? - Current events stuff needs to be removed or updated. - I don't think we need to list every Challenge. Maybe 2-3 of the recent ones and then links to the categories (which themselves could be polished a bit). - I think the "Building a Bot" section is really important. It's not bad right now, but could be better. We should probably link to more of the Robocode docs stuff. Radar is the 2nd link and goes to a very detailed page, but Targeting and Movement are not as newbie friendly. - Would be good to link to specific details about development tools - like Utilities, RoboResearch, Robocode/Eclipse. - I think the layout and colors could also be revamped - especially that gray background is pretty drab. - Maybe more prominent and detailed info on the RoboRumble. - Is the @roborumble Twitter feed interesting enough to include? (Like the @robowiki tweets on the sidebar.) I'm sure there's plenty more we could do, just some quick brainstorming for now... --Voidious 19:01, 10 February 2011 (UTC) Another thing I think we need to note (especially to someone who is Wikipedia's editor) is that the RoboWiki is subpage-based, like we like Robocode/Getting_Start and not Getting_Start_to_Robocode. Just my two cents. --Nat Pavasant 10:59, 11 February 2011 (UTC) About the spam... - See bottom of page for spamlists. - Has our old math captcha type thing in it. - No idea how this is suppose to work That is all. :) — Chase-san 16:39, 12 March 2011 (UTC) We already use ConfirmEdit in reCAPTCHA mode. I checked SpamBlacklist and it's blacklists don't match anything in the recent spam. Unsure about Bad_Behavior. Now, on the ConfirmEdit page, it does note that as of 2011 spammers seem to have gotten around reCAPTCHA by some means. --Rednaxela 18:18, 12 March 2011 (UTC) I think the trick is to do something custom so that the usual anti-antispam techniques don't work. Just something simple like "What is the next prime number after 7?" or so. --Skilgannon 08:40, 13 March 2011 (UTC) Maybe the spammer wait for autoconfirm to bypass the reCAPTCHA. Voidious, can we change the autoconfirm rules to four edits and ten days? And enable reCAPTCHA for every page edit in non-autoconfirm group? --Nat Pavasant 01:41, 14 March 2011 (UTC) Ah, good call! I forgot about the autoconfirm stuff. I just changed it to 4 edits/10 days. I checked and we only skip the captcha if you're autoconfirmed, bot, or sysop, so I think we're good. Hopefully we'll be seeing less spam now... --Voidious 01:56, 14 March 2011 (UTC) Since the spam was still coming in, I updated mediawiki, update the version of the ConfirmEdit extension, AND did some custom hacks to the ConfirmEdit extension to make it simultaneously require both reCAPTCHA and a LaTeX-rendered arithmetic question. Hopefully the combination of the two will keep this spam out for good :) --Rednaxela 06:06, 19 March 2011 (UTC) I thought Voidious still keeping it up to date..... Well, Red, you have checked permission of upload folder, right? Every time Void upgrade the MediaWiki, he needs to re-set the permission of upload folder. Good thought with Recent Change cleanup, though. If the spam doesn't stop (as I believe many spambot can solve simple math image, I've face one), maybe you could have them solve equation ;-). --Nat Pavasant 07:54, 19 March 2011 (UTC) Well, it wasn't *very* behind in versions, but was still 1.15.x as opposed to 1.16.x. Yep, I made sure to check the permissions for that I believe. Yeah, I wouldn't be surprised if some spambots can solve ConfirmEdit's MathCaptcha as-is, but I'm doubting that many spambots are designed to handle both that and reCAPTCHA on the same page. If they do however, I'll keep that suggestion in mind :) --Rednaxela 13:57, 19 March 2011 (UTC) I took a look at the HTTP logs and the recent spambots have been jumping IP addresses, never solving the reCAPTCHA from the same IP that they requested it via. As a precaution, I further tweaked the captcha code to only accept answers as valid if they're from the same IP that was given that captcha. --Rednaxela 15:34, 19 March 2011 (UTC) It seems like we're still getting a high number of users with odd names... might I suggest doing something custom? Doesn't have to be complicated, just "the next prime number after X" or "the next number in the sequence 3 6 9 ..." or some such. The trick is to make it not worth the time and effort for the spammer to implement a custom cracker just for robowiki. --Skilgannon 13:04, 21 March 2011 (UTC) I've looked at the http logs for the IP addresses of those recent users, and they appear to be real people. They browsed some pages, made an account for some reason, browsed some more pages. Btw, an interesting stat: On March 21st so far (based on the server's clock), there have been 76 failed signup attempts due to captcha, and none of them browse pages before the signup attempt. --Rednaxela 23:25, 21 March 2011 (UTC) Just seems odd to have 3 new people in 1 day =) Looks like it's stopped, nice work Red. I'm guessing the usage patterns would be a good attribute to block on if the spam ever picks up again in the future. --Skilgannon 12:16, 22 March 2011 (UTC) Glad I could help by starting a thread about the problem. I also like making custom security. One of my favorite methods is changing the URL to the php or module that does the add page/register, and making a fake captcha (hidden via CSS, putting `anything` in it means you fail the check). :P — Chase-san 15:31, 24 March 2011 (UTC) Robot Repository is down. Can't get any of the newer bots nor run the game efficiencty :) --Miked0801 16:00, 16 May 2011 (UTC) I can delete this entry when the issue is fixed... It seems that Robot Repository has been down for about a week. Been trying to upload my newest bot but its not working. I need a place to upload my bot. --Khanguy 03:08, 17 May 2011 (UTC) Well, one idea for both of you would be to using Google Sites to upload your bot; I started doing that recently and found it to be much simpler and easier than uploading bots onto the repository.--CrazyBassoonist 03:45, 17 May 2011 (UTC) The problem being, I can't get the latest bots for running the rumble nor competing against you CB :) --Miked0801 04:28, 17 May 2011 (UTC) The Robocode repository has always been sort of unreliable, hence why I've never used it personally. Anyway, perhaps look for backups of the relevant bots here or here? If missing ones that are avaliable in neither, list the ones that are missing? --Rednaxela 11:57, 17 May 2011 (UTC) Neither of these locations contain CrazyBassoonists latest nanos, Fuatisha 1.1 nor Caligula 1.15. I'm also pretty sure they are missing a few of the other minis that were updated within the last 6 months as well. --Miked0801 14:57, 17 May 2011 (UTC) If you go ahead and post your bot, I'll run a few battles to get some rankings. From now on none of my bots will be on the repository, but they won't make the switch until the next version.--CrazyBassoonist 20:12, 17 May 2011 (UTC) Is is possible to upload bots here on the wiki?--Khanguy 23:43, 17 May 2011 (UTC) Technically, yes. The issue we ran into is that it forces all filenames to start with upper case, while we all want the package name lower case. But we want the enforced on page titles, so we're kind of stuck. We could setup some kind of external file upload solution on the wiki server, just hasn't been a pressing issue because of other options, like Google Sites. --Voidious 00:59, 18 May 2011 (UTC) I can't post a new version of my bots because I have no way of testing against your current bots Bassoon :) And I can't really contribute without a complete set of bots for the tournament server stuff. Grrr. --Miked0801 01:25, 18 May 2011 (UTC) Hey Voidious, can you upload all the bots currently in the rumble to your dijitari site? or maybe another site. Its to help poor Miked here.--Khanguy 04:21, 18 May 2011 (UTC) Yep, here's every JAR in my RoboRumble directory, but I didn't take the time to remove multiple versions of older bots (I'm sure there are a ton). So probably want to cherry pick the ones you need. roborumble_bots.zip (69 MB) --Voidious 13:42, 18 May 2011 (UTC) Dl'ing now. Can't wait to see how things have changed :) --Miked0801 04:16, 19 May 2011 (UTC) I don't suppose you can make it so I do not have to fill out those two captcha boxes, unless you think I am going to spam everyone with external links ;-) — Chase-san 15:20, 9 July 2011 (UTC) I have a proposal: create new roles with skipcaptcha rights. Give this role to trusted users. This should help protect from spam and let others in peace. I know almost all long-time active accounts here are already admin with skipcaptcha rights... --Nat Pavasant 11:29, 10 July 2011 (UTC) Google Plus A number of the other robocoders and I myself are now on Google plus (thanks to Voidious and others). So if any of the rest of you feel up to it, you could join us as well. PEZ, Voidious, Skilgannon, Darkcanuck, FNL, Pavel Šavara, Nat, and a few others (probably) are there. Just ask for an invite, I am sure one of us could help out. — Chase-san 02:49, 14 July 2011 (UTC) - I've never even heard of Google Plus. Guess I should check it out if I want to maintain my computer programmer guy credentials. > Update: I just checked it out. I tried to log in, but it says they have exceeded their current capacity, and it won't let me do anything. -- Skotty 02:53, 14 July 2011 (UTC) - No idea how much like or unlike Facebook it is. Since I never used Facebook. See my latest post on the Talk:XanderCat page for a way to get your e-mail to us. — Chase-san 03:07, 14 July 2011 (UTC) - Alright, the reason that particular thing works is because bots are very simple and look for character matches, and the two things that it looks for in that thing are very common words/punctuation. So by doing that thing it breaks the bot from quickly scanning to find those things. Having to do that thing to the entire page to find just those things would be for the most case, not worth the bots time and effort as it is not often used as a way to conceal the thing we are talking about. — Chase-san 03:18, 14 July 2011 (UTC) - it will be cool, if someone invite me to google+:) my email: alexey dot jdev dot zhidkov at gmail dot com --Jdev 04:43, 14 July 2011 (UTC) Community website/Fan site? Is there a community website or a fan website that allows robocode fans to communicate easier (such as a forum and a portal)? Or is the wiki's talk pages it? I was wondering as Robocode's wiki is a little outdated (and a lot of resorces as well) in some areas and the Talk pages are difficult to navigate with (in my opinion). It's also difficult to see how big of an active community there is. --Ultimatebuster 21:25, 30 August 2011 (UTC) The RoboWiki is pretty much the main community site. You're right that talk pages aren't ideal - we've been looking at the LiquidThreads extensions for MediaWiki, but haven't installed it yet. You can see Special:ActiveUsers to get an idea of active community size (it fluctuates a lot over time). There's indeed a lot of older content on the wiki, but I think most of the main important articles are pretty well up to date. Hope you stick around! =) --Voidious 21:39, 30 August 2011 (UTC) EDIT CONFLICT Most of the active people (that I know of) hang around on google plus. But lacking groups you need to add everyone one by one on there (difficulty varies). Usually we watch the recent changes page for whats going on. If that seems like to much work, you can watch the robowiki twitter instead. I personally keep in contact with others via any number of messengers and so on. But if you're asking if most of us all frequent a forum, the answer as far as I know, is, no. The only semi-active group I know of is the yahoo one and that pales vs the wiki activity usually. We could probably set up forums, but it is unlikely that at least I would use them. — Chase-san 21:44, 30 August 2011 (UTC) The Yahoo robocode group isn't very active. The robocode facebook group has someone ask the occaisonal question and fnl seems to be the only actuve user. What I was thinking of doing was to create a robocode/robowiki app (android for now), one that gets you the latest on the robocode community and allows you to submit questions as well as suggestions.Khanguy 05:13, 31 August 2011 (UTC)
http://robowiki.net/wiki/Archived_talk:Main_Page_20110903
CC-MAIN-2017-26
refinedweb
6,745
71.14
1.4.1. Buildbot in 5 minutes - a user-contributed tutorial¶ (Ok, maybe 10.) Buildbot is really an excellent piece of software, however it can be a bit confusing for a newcomer (like me when I first started looking at it). Typically, at first sight it looks like a bunch of complicated concepts that make no sense and whose relationships with each other are unclear. After some time and some reread, it all slowly starts to be more and more meaningful, until you finally say “oh!” and things start to make sense. Once you get there, you realize that the documentation is great, but only if you already know what it’s about. This is what happened to me, at least. Here I’m going to (try to) explain things in a way that would have helped me more as a newcomer. The approach I’m taking is more or less the reverse of that used by the documentation, that is, I’m going to start from the components that do the actual work (the builders) and go up the chain from there up to change sources. I hope purists will forgive this unorthodoxy. Here I’m trying to clarify the concepts only, and will not go into the details of each object or property; the documentation explains those quite well. 1.4.1.1. Installation¶ I won’t cover the installation; both Buildbot master and worker are available as packages for the major distributions, and in any case the instructions in the official documentation are fine. This document will refer to Buildbot 0.8.5 which was current at the time of writing, but hopefully the concepts are not too different in other versions. All the code shown is of course python code, and has to be included in the master.cfg master configuration file. We won’t cover the basic things such as how to define the workers, project names, or other administrative information that is contained in that file; for that, again the official documentation is fine. 1.4.1.2. Builders: the workhorses¶ Since Buildbot is a tool whose goal is the automation of software builds, it makes sense to me to start from where we tell Buildbot how to build our software: the builder (or builders, since there can be more than one). Simply put, a builder is an element that is in charge of performing some action or sequence of actions, normally something related to building software (for example, checking out the source, or make all), but it can also run arbitrary commands. A builder is configured with a list of workers that it can use to carry out its task. The other fundamental piece of information that a builder needs is, of course, the list of things it has to do (which will normally run on the chosen worker). In Buildbot, this list of things is represented as a BuildFactory object, which is essentially a sequence of steps, each one defining a certain operation or command. Enough talk, let’s see an example. For this example, we are going to assume that our super software project can be built using a simple make all, and there is another target make packages that creates rpm, deb and tgz packages of the binaries. In the real world things are usually more complex (for example there may be a configure step, or multiple targets), but the concepts are the same; it will just be a matter of adding more steps to a builder, or creating multiple builders, although sometimes the resulting builders can be quite complex. So to perform a manual build of our project we would type this from the command line (assuming we are at the root of the local copy of the repository): $ make clean # clean remnants of previous builds ... $ svn update ... $ make all ... $ make packages ... # optional but included in the example: copy packages to some central machine $ scp packages/*.rpm packages/*.deb packages/*.tgz someuser@somehost:/repository ... Here we’re assuming the repository is SVN, but again the concepts are the same with git, mercurial or any other VCS. Now, to automate this, we create a builder where each step is one of the commands we typed above. A step can be a shell command object, or a dedicated object that checks out the source code (there are various types for different repositories, see the docs for more info), or yet something else: from buildbot.plugins import steps, util # first, let's create the individual step objects # step 1: make clean; this fails if the worker has no local copy, but # is harmless and will only happen the first time makeclean = steps.ShellCommand(name="make clean", command=["make", "clean"], description="make clean") # step 2: svn update (here updates trunk, see the docs for more # on how to update a branch, or make it more generic). checkout = steps.SVN(baseURL='svn://myrepo/projects/coolproject/trunk', mode="update", username="foo", password="bar", haltOnFailure=True) # step 3: make all makeall = steps.ShellCommand(name="make all", command=["make", "all"], haltOnFailure=True, description="make all") # step 4: make packages makepackages = steps.ShellCommand(name="make packages", command=["make", "packages"], haltOnFailure=True, description="make packages") # step 5: upload packages to central server. This needs passwordless ssh # from the worker to the server (set it up in advance as part of worker setup) uploadpackages = steps.ShellCommand(name="upload packages", description="upload packages", command="scp packages/*.rpm packages/*.deb packages/*.tgz someuser@somehost:/repository", haltOnFailure=True) # create the build factory and add the steps to it f_simplebuild = util.BuildFactory() f_simplebuild.addStep(makeclean) f_simplebuild.addStep(checkout) f_simplebuild.addStep(makeall) f_simplebuild.addStep(makepackages) f_simplebuild.addStep(uploadpackages) # finally, declare the list of builders. In this case, we only have one builder c['builders'] = [ util.BuilderConfig(name="simplebuild", workernames=['worker1', 'worker2', 'worker3'], factory=f_simplebuild) ] So our builder is called simplebuild and can run on either of worker1, worker2 and worker3. If our repository has other branches besides trunk, we could create another one or more builders to build them; in the example, only the checkout step would be different, in that it would need to check out the specific branch. Depending on how exactly those branches have to be built, the shell commands may be recycled, or new ones would have to be created if they are different in the branch. You get the idea. The important thing is that all the builders be named differently and all be added to the c['builders'] value (as can be seen above, it is a list of BuilderConfig objects). Of course the type and number of steps will vary depending on the goal; for example, to just check that a commit doesn’t break the build, we could include just up to the make all step. Or we could have a builder that performs a more thorough test by also doing make test or other targets. You get the idea. Note that at each step except the very first we use haltOnFailure=True because it would not make sense to execute a step if the previous one failed (ok, it wouldn’t be needed for the last step, but it’s harmless and protects us if one day we add another step after it). 1.4.1.3. Schedulers¶ Now this is all nice and dandy, but who tells the builder (or builders) to run, and when? This is the job of the scheduler, which is a fancy name for an element that waits for some event to happen, and when it does, based on that information decides whether and when to run a builder (and which one or ones). There can be more than one scheduler. I’m being purposely vague here because the possibilities are almost endless and highly dependent on the actual setup, build purposes, source repository layout and other elements. So a scheduler needs to be configured with two main pieces of information: on one hand, which events to react to, and on the other hand, which builder or builders to trigger when those events are detected. (It’s more complex than that, but if you understand this, you can get the rest of the details from the docs). A simple type of scheduler may be a periodic scheduler: when a configurable amount of time has passed, run a certain builder (or builders). In our example, that’s how we would trigger a build every hour: from buildbot.plugins import schedulers # define the periodic scheduler hourlyscheduler = schedulers.Periodic(name="hourly", builderNames=["simplebuild"], periodicBuildTimer=3600) # define the available schedulers c['schedulers'] = [hourlyscheduler] That’s it. Every hour this hourly scheduler will run the simplebuild builder. If we have more than one builder that we want to run every hour, we can just add them to the builderNames list when defining the scheduler and they will all be run. Or since multiple scheduler are allowed, other schedulers can be defined and added to c['schedulers'] in the same way. Other types of schedulers exist; in particular, there are schedulers that can be more dynamic than the periodic one. The typical dynamic scheduler is one that learns about changes in a source repository (generally because some developer checks in some change), and triggers one or more builders in response to those changes. Let’s assume for now that the scheduler “magically” learns about changes in the repository (more about this later); here’s how we would define it: from buildbot.plugins import schedulers # define the dynamic scheduler trunkchanged = schedulers.SingleBranchScheduler(name="trunkchanged", change_filter=util.ChangeFilter(branch=None), treeStableTimer=300, builderNames=["simplebuild"]) # define the available schedulers c['schedulers'] = [trunkchanged] This scheduler receives changes happening to the repository, and among all of them, pays attention to those happening in “trunk” (that’s what branch=None means). In other words, it filters the changes to react only to those it’s interested in. When such changes are detected, and the tree has been quiet for 5 minutes (300 seconds), it runs the simplebuild builder. The treeStableTimer helps in those situations where commits tend to happen in bursts, which would otherwise result in multiple build requests queuing up. What if we want to act on two branches (say, trunk and 7.2)? First we create two builders, one for each branch (see the builders paragraph above), then we create two dynamic schedulers: from buildbot.plugins import schedulers # define the dynamic scheduler for trunk trunkchanged = schedulers.SingleBranchScheduler(name="trunkchanged", change_filter=util.ChangeFilter(branch=None), treeStableTimer=300, builderNames=["simplebuild-trunk"]) # define the dynamic scheduler for the 7.2 branch branch72changed = schedulers.SingleBranchScheduler(name="branch72changed", change_filter=util.ChangeFilter(branch='branches/7.2'), treeStableTimer=300, builderNames=["simplebuild-72"]) # define the available schedulers c['schedulers'] = [trunkchanged, branch72changed] The syntax of the change filter is VCS-dependent (above is for SVN), but again once the idea is clear, the documentation has all the details. Another feature of the scheduler is that is can be told which changes, within those it’s paying attention to, are important and which are not. For example, there may be a documentation directory in the branch the scheduler is watching, but changes under that directory should not trigger a build of the binary. This finer filtering is implemented by means of the fileIsImportant argument to the scheduler (full details in the docs and - alas - in the sources). 1.4.1.4. Change sources¶ Earlier we said that a dynamic scheduler “magically” learns about changes; the final piece of the puzzle are change sources, which are precisely the elements in Buildbot whose task is to detect changes in the repository and communicate them to the schedulers. Note that periodic schedulers don’t need a change source, since they only depend on elapsed time; dynamic schedulers, on the other hand, do need a change source. A change source is generally configured with information about a source repository (which is where changes happen); a change source can watch changes at different levels in the hierarchy of the repository, so for example it is possible to watch the whole repository or a subset of it, or just a single branch. This determines the extent of the information that is passed down to the schedulers. There are many ways a change source can learn about changes; it can periodically poll the repository for changes, or the VCS can be configured (for example through hook scripts triggered by commits) to push changes into the change source. While these two methods are probably the most common, they are not the only possibilities; it is possible for example to have a change source detect changes by parsing some email sent to a mailing list when a commit happens, and yet other methods exist. The manual again has the details. To complete our example, here’s a change source that polls a SVN repository every 2 minutes: from buildbot.plugins import changes, util svnpoller = changes.SVNPoller(repourl="svn://myrepo/projects/coolproject", svnuser="foo", svnpasswd="bar", pollinterval=120, split_file=util.svn.split_file_branches) c['change_source'] = svnpoller This poller watches the whole “coolproject” section of the repository, so it will detect changes in all the branches. We could have said: repourl = "svn://myrepo/projects/coolproject/trunk" or: repourl = "svn://myrepo/projects/coolproject/branches/7.2" to watch only a specific branch. To watch another project, you need to create another change source – and you need to filter changes by project. For instance, when you add a change source watching project ‘superproject’ to the above example, you need to change: trunkchanged = schedulers.SingleBranchScheduler(name="trunkchanged", change_filter=filter.ChangeFilter(branch=None), # ... ) to e.g.: trunkchanged = schedulers.SingleBranchScheduler(name="trunkchanged", change_filter=filter.ChangeFilter(project="coolproject", branch=None), # ... ) else coolproject will be built when there’s a change in superproject. Since we’re watching more than one branch, we need a method to tell in which branch the change occurred when we detect one. This is what the split_file argument does, it takes a callable that Buildbot will call to do the job. The split_file_branches function, which comes with Buildbot, is designed for exactly this purpose so that’s what the example above uses. And of course this is all SVN-specific, but there are pollers for all the popular VCSs. But note: if you have many projects, branches, and builders it probably pays to not hardcode all the schedulers and builders in the configuration, but generate them dynamically starting from list of all projects, branches, targets etc. and using loops to generate all possible combinations (or only the needed ones, depending on the specific setup), as explained in the documentation chapter about Customization. 1.4.1.5. Reporters¶ Now that the basics are in place, let’s go back to the builders, which is where the real work happens. Reporters are simply the means Buildbot uses to inform the world about what’s happening, that is, how builders are doing. There are many reporters: a mail notifier, an IRC notifier, and others. They are described fairly well in the manual. One thing I’ve found useful is the ability to pass a domain name as the lookup argument to a mailNotifier, which allows you to take an unqualified username as it appears in the SVN change and create a valid email address by appending the given domain name to it: from buildbot.plugins import. Here all the details. 1.4.1.6. Conclusion¶ Please note that this article has just scratched the surface; given the complexity of the task of build automation, the possibilities are almost endless. So there’s much, much more to say about Buildbot. However, hopefully this is a preparation step before reading the official manual. Had I found an explanation as the one above when I was approaching Buildbot, I’d have had to read the manual just once, rather than multiple times. Hope this can help someone else. (Thanks to Davide Brini for permission to include this tutorial, derived from one he originally posted at .)
https://docs.buildbot.net/current/tutorial/fiveminutes.html
CC-MAIN-2018-34
refinedweb
2,654
58.01
How to do custom pagination in Python (Django) on list? : We want to paginate with 3 data in each page. We have Paginator class in django, so we use paginator class such as: paginator = Paginator(data, 3)# 3 here is limit number, you can give dynamic value changing it to your required limit number Import Paginator class from from django.core.paginator import Paginator Now, objects of paginator class is breakdown as: objects = paginator.page(1)# 1 here is page number, you can give dynamic value changing it to your required page numberreturn list(objects) This should give you first 3 data as : Thats all! I have used this pagination on list needs to be curated from multiple query in django here: Hoping this will help!
https://sonikabaniya.medium.com/how-to-do-custom-pagination-in-python-django-on-list-88402be38b6d?source=post_internal_links---------4----------------------------
CC-MAIN-2021-31
refinedweb
126
51.18
Difference between revisions of "BeagleBoard/GSoC/Meetings/20100621" From eLinux.org < BeagleBoard | GSoC | Meetings Revision as of 22:40, 12 April 2011 Contents Attendees - koen - Jefro - ppoudel - ssc - Katie - av500 - Crofton - cfriedt - topfs2 - notzed - maltanar - mru - drinkcat - eFfeM_work Agenda - Hardware review - Weekly reports - Lightning Talks - Problems building project - Blog titles not showing up in email - Documentation on the wiki Discussion Hardware - neo01124 still needs an XDS100v2 - no update Weekly Reports - jkridner1 (Jason) to evaluate Yahoo Pipes, feedmyinbox.com, and feedmailer.com - continued to next week, low priority Lightning Talks - several students have submitted/uploaded or will today - some need more than 5 minutes, is that ok? - students should put link to uploaded presentations on their project page - Q&A Weds, proposed same time slot as this meeting (2pm UTC = 7am PDT, 9am CDT, 4pm Europe time) on #beagleboard-gsoc Problems building project - ppoudel resolved namespace issues, has repository - maltanar has some runtime issues - Be sure to escalate any problems building your project. Documentation on the wiki - Students should upload instructions to build each project on those subpages, or link to repositories. - Basic build details are most important for now, descriptive & usage stuff to come, talk to Jefro w/any probs Actions - [Jefro] follow up with jkridner re XDS100v2 for neo01124 - [Jefro] determine deliverables for the mid-term in July - evals? code? documentation? - [all] Lightning Talks Q&A weds 1400UTC #beagleboard-gsoc Transcript (07:03:09 AM) The topic for #beagleboard-gsoc is: BeagleBoard.org Google Summer of Code (07:03:13 AM) ssc [c05b3c0a@gateway/web/freenode/ip.192.91.60.10] entered the room. (07:03:25 AM) ssc: Hi - Sorry for being a bit late (07:03:31 AM) Jefro: good morning all (07:03:38 AM) maltanar: greetings (07:03:54 AM) ppoudel: good morning (07:05:16 AM) topfs2: morning (07:05:31 AM) mru: greetings, earthlings (07:06:25 AM) koen: ah, Jefro is here now :) (07:06:31 AM) koen: at least one volunteer less needed (07:06:52 AM) Jefro: :) (07:07:00 AM) Jefro: no jkridner yet? (07:07:10 AM) katie: jkridner is on a plane (07:07:12 AM) av500: on a plane (07:07:21 AM) ***av500 hopes no snakes (07:07:40 AM) topfs2: hell that was a bad movie :S (07:07:51 AM) Jefro: I think he lives on planes (07:08:14 AM) cfriedt: i'm so sick of flying... (07:08:48 AM) Crofton: heh (07:08:52 AM) Crofton: I am in frankfurt .... (07:09:04 AM) Crofton: you are in Canada again? (07:09:11 AM) notzed: flying gets old very fast (07:09:30 AM) Crofton: especially with several fussy kids nearby .... (07:09:50 AM) koen: topfs2: I use SOAP to test dvd playback on beagle :) (07:10:10 AM) cfriedt: me, yea... (07:10:22 AM) topfs2: koen, you share a dvd drive to the beagle? (07:10:26 AM) ***notzed sees a dog, a sudsy bath and a shiny disk in his minds eye (07:10:31 AM) koen: topfs2: usb dvd drive (07:10:58 AM) topfs2: isn't soap that internet thingy? (07:11:11 AM) ***Jefro doesn't fly much any more (07:11:43 AM) Jefro: should we get started? taking roll call from banter, still missing a few (07:12:15 AM) ***maltanar is here (07:12:20 AM) drinkcat: here (07:12:25 AM) Jefro: drinkcat, cmurillo, eFfeM_work (07:12:29 AM) Jefro: ah, hello drinkcat (07:12:47 AM) ***topfs2 here (07:12:50 AM) cmurillo: Jefro: morning! (07:12:51 AM) ***katie here (07:13:04 AM) Jefro: good morning! (UGT) (07:13:10 AM) ppoudel: ppoudel here (07:14:03 AM) Jefro: roll call going once.. going twice.. need another 30 sec to put agenda together (07:14:40 AM) cfriedt: present (07:15:07 AM) eFfeM_work: hey Jefro, i'm here (07:16:10 AM) koen: Jefro: gm! (07:16:27 AM) Crofton: I'm here, for now at least (07:16:40 AM) Crofton: I need to catch a bus about 5 (07:16:44 AM) Jefro: ok, thanks - agenda now at (07:16:54 AM) Jefro: it looks very suspiciously like last week's agenda (07:17:14 AM) eFfeM_work: :-) (07:17:16 AM) Jefro: now adding eFfeM_work, think I got everyone else (07:17:57 AM) Jefro: shall we start with hardware? last week neo01124 needed an XDS100v2 (07:18:28 AM) ssc: I haven't heard anything more about that, and it seems, that he unfortunately isn't here today (07:19:04 AM) av500: he was on irc ~2h ago (07:19:11 AM) Jefro: ok - will continue to next week. I'll also take an action to mention it to jkridner to see what we can do. (07:20:01 AM) Jefro: I also have down "drinkcat isn't familiar with hardware JTAG-based debugging tools", any progress? (07:20:40 AM) drinkcat: I don't need one (07:21:03 AM) mru: :-) (07:21:12 AM) Jefro: check (07:21:41 AM) Jefro: I like bullet items that are easy to check off the list (07:22:21 AM) Jefro: speaking of which, I'm just going to buzz through this agenda and then we can come back to anything that needs more discussion (07:22:51 AM) maltanar: sounds good :) (07:23:04 AM) Jefro: I don't think jkridner has evaluated any new tools for the Weekly Reports, but that is a low priority item - will just continue to next week (07:23:05 AM) eFfeM_work: ok (07:23:23 AM) Jefro: The Lightning Talks are much more pressing. Has anyone sent in a talk yet? (07:23:33 AM) drinkcat: where do we need to send them? (07:23:44 AM) topfs2: and what to send them as (07:23:49 AM) maltanar: I've mailed mine to my mentors, jkridner and one more person whose mail I didn't recognize (07:24:04 AM) maltanar: can't access youtube, so someone else will have to upload mine for me (07:24:19 AM) koen: maltanar: can you access vimeo? (07:24:27 AM) maltanar: hmm, I think so, let me check (07:24:28 AM) ppoudel: I will send it after the meeting. (07:24:42 AM) topfs2: I have audio and a presentation, have you others sent video? (07:24:43 AM) maltanar: koen: yes, I appearantly can :) (07:24:49 AM) koen: what do people think of using the lightning talk movies to send a summary to the main beagle list? (07:24:50 AM) topfs2: i.e. readily done video? (07:25:05 AM) maltanar: I used Windows Movie Maker to put a video together (07:25:15 AM) topfs2: goodie, I'll do that quick and send it then (07:25:18 AM) ***maltanar puts on asbestos clothes (07:25:55 AM) drinkcat: topfs2: I have a video too (managed to do it with ffmpeg/mencoder somehow) (07:26:15 AM) Jefro: maltanar LOL - note that in industry, people normally use the tools they need to get jobs done. (07:26:36 AM) Jefro: very glad to see the activity around videos, jkridner will be most pleased (07:26:54 AM) maltanar: mine was about 7 minutes though - will that be a problem? (07:27:07 AM) ppoudel: I tried "camtasia ", seem to work good for this lightinig talk. (07:27:07 AM) av500: drinkcat: extra bonus points for using ffmpeg (07:27:07 AM) koen: Jefro: I thought in the industry you'd turn it into a stretch goal, mumble about ROI and forget about it? (07:27:10 AM) Jefro: it would be good to cut it down to 5, as most Lightning Talks are limited that way (07:27:32 AM) Jefro: koen ha.. depends on the company. the ones where I worked that did that aren't around any more. (07:27:54 AM) drinkcat: Jefro: Well, I have 6.30, in the worst case, I'll accelerate the video..... (07:28:20 AM) Jefro: drinkcat ROFL - you'll look very excited about your project (and like you had too much coffee) (07:28:37 AM) topfs2: Well mine is 4 or something so you can have a miniute :) (07:28:48 AM) drinkcat: topfs2: thanks, lol (07:29:40 AM) Jefro: in my experience, there is often a lull in the middle of most presentations that contains some material to cut. if you desperately need 6 or 7 minutes, it is possible they'll give it to you, but normally in Lightning talks (as in Ignite! talks) anything over 5 minutes is simply cut. (07:29:58 AM) Jefro: we'll have to ask Jason, as he runs these talks (07:30:21 AM) drinkcat: "Duration should only be 5 minutes."... I see should, not MUST ,-) (07:30:38 AM) Jefro: drinkcat yep - it definitely depends on the person running the show. (07:30:38 AM) maltanar: I had noticed there were some in the previous years which went for ~8 mins and thought it wouldn't matter, but I can try to snip off things here and there in the other case (07:31:10 AM) Jefro: maltanar really? I only attended the one last fall and didn't see any that long. (07:31:21 AM) Jefro: I suggest not sweating too much about it. (07:31:33 AM) Jefro: jkridner proposed a Q&A on this channel on Weds (07:31:36 AM) eFfeM_work: speed it up, everyone think you're donald duck (07:32:08 AM) Jefro: no time was announced - if this time slot works on Weds, I'll propose that. (07:32:13 AM) ppoudel: Is is this channel or on beagle? (07:32:22 AM) ppoudel: Is it* (07:32:34 AM) Jefro: for the lightning talks I would think here rather than in the main channel (07:32:52 AM) Jefro: unless there is something that would be of benefit for the other 200 chatters to read (07:32:55 AM) av500: main channel might have too much noise at the same time (07:33:09 AM) av500: we can still invite ppl over (07:33:32 AM) Jefro: yep (07:35:07 AM) Jefro: excellent - can't wait to see your presentations (07:35:10 AM) topfs2: isn't the point to get people interested? (07:35:22 AM) topfs2: i.e. normal people? (07:35:37 AM) drinkcat: topfs2: define "normal people" (07:35:44 AM) ***mru feels normal (07:35:53 AM) topfs2: "normal" as in those in #beagle aswell :) (07:35:58 AM) Jefro: topfs2 yes, but this q&a is for working out details & problems. we can announce the talks to the "normal people" when they are done, and hopefully get them to come to the Lightning Talks. (07:36:07 AM) topfs2: ooh, ok (07:37:10 AM) Jefro: anything more on lightning talks? be sure to put links to uploaded videos on your project page (07:37:30 AM) drinkcat: where do we need to send them? (07:38:17 AM) ppoudel: Are we going to upload it ourself or send to jkridner? (07:38:33 AM) Jefro: if you have a video, it is probably best to upload it to youtube or vimeo and just send a link (07:39:03 AM) Jefro: (if you have trouble with either of those, let me know) (07:39:29 AM) Jefro: if you have slides & audio, you can probably just send them to jkridner and your mentor(s) (07:39:30 AM) ppoudel: Jefro: sure. (07:39:51 AM) Jefro: anyone planning to do it live? (07:41:08 AM) Jefro: the time of day is not great for Europe (07:41:23 AM) maltanar: when will it be? (07:42:08 AM) ***Jefro tries to check... (07:42:35 AM) drinkcat: 4pm? Not too bad for Europe... (07:42:44 AM) Jefro: I can't find a link to the date, but believe it is during the business day in the US, probably 9am - 4pm (drinkcat, got a link?) (07:43:10 AM) Jefro: probably not bad if you normally work & stay up late (07:43:12 AM) drinkcat: sorry I thought it would be the same time as this meeting (07:44:02 AM) Jefro: 9am Texas time is... 2pm zulu, I think (07:44:54 AM) drinkcat: well, if you move it earlier, then it's too early for US... (07:45:19 AM) Jefro: yep. (this meeting starts at 7am for me) (07:45:32 AM) Crofton left the room (quit: Quit: Leaving). (07:45:45 AM) ssc: I will unfortunately have to leave as well within 10-15 minutes (07:46:58 AM) Jefro: ssc no prob, I think we are getting close to the finish line (07:47:25 AM) Jefro: moving on unless anyone has anything more about lightning talks (07:47:37 AM) Jefro: any problems building that we can hammer out here? (07:47:52 AM) koen: Jefro: it's now 16:47 in most of europe (07:48:28 AM) av500: 48 (07:48:41 AM) Jefro: koen thanks (07:49:39 AM) eFfeM_work: have to leave now, sry, later (& will read log) (07:49:54 AM) Jefro: no prob, thanks eFfeM_work (07:50:02 AM) Jefro: ppoudel, you reported namespace issues last week, were those resolved? (07:50:14 AM) ppoudel: I am good (07:50:21 AM) ppoudel: resolved it (07:50:54 AM) maltanar: I broke some pieces of C6RunApp while integrating my own changes, but I'll fix them this week (07:51:48 AM) Jefro: ppoudel great, glad to hear (07:52:02 AM) Jefro: maltanar breaking the build is pretty common in the early stages (07:52:04 AM) ppoudel: Jefro: Thanks (07:52:26 AM) maltanar: not really build issues - it builds fine but C I/O won't work (07:52:52 AM) maltanar: probably something about DSP/Link config or calling something at the wrong time (07:52:53 AM) Jefro: runtime issues, fun :) (07:52:58 AM) av500: ppoudel: btw, nice for being one of the 1st ppl on the BB writing dsplink/CE code :) (07:53:13 AM) av500: finally I have an url to point ppl to... (07:53:21 AM) av500: so, please make good docs :) (07:53:59 AM) Jefro: yes, definitely. for those who don't know, I am actually a technical writer rather than a developer, and I'd be happy to help with any documentation issues. (07:54:01 AM) ppoudel: av500: oh is it? I did not know. thanks (07:54:53 AM) ppoudel: av500: Ya sure. I will give my best shot. (07:55:56 AM) ppoudel: Jefro: Thats a good news for us. Most of the time it takes hours for me to write simple stuff. I may need your help sometime. (07:56:14 AM) Jefro: ppoudel I'd be happy to help (07:56:38 AM) ppoudel: Jefro: Thanks (07:57:00 AM) Jefro: getting close to the end of our time. any other build issues? (07:57:38 AM) Jefro: silence is golden :) (07:58:01 AM) koen: av500: don't forget iUniversal (07:58:24 AM) av500: koen: yep (07:58:36 AM) Jefro: the only other agenda item is a reminder to spend a little quality time with your projects page. all of them are linked at and all should at this point have basic build instructions, or a link to them on your own page. (07:59:15 AM) notzed: mid terms are coming up too ... (07:59:55 AM) Jefro: notzed midterms are one of the things I don't miss from college :) (08:00:29 AM) ssc: Have to leave - See you - SSC (08:00:34 AM) ssc left the room. (08:00:41 AM) Jefro: bye ssc (08:01:00 AM) notzed: fortunately, i don't think there's anything i miss from uni (08:01:35 AM) Jefro: I'm glad to see basic documentation in most of the pages. take a look at the USB Sniffer page for a test table also, very nice (08:01:44 AM) koen: indeed (08:02:11 AM) drinkcat: what are the deliverables for the midterm? code, and build instructions for sure. Some documentation as well? (08:02:12 AM) topfs2: Got to love sweden uni, rarely have mid terms :) (08:02:19 AM) mru: :-) (08:02:29 AM) mru: topfs2: which uni are you in? (08:02:30 AM) cfriedt: midterms are sometimes a good thing (08:02:41 AM) Jefro: drinkcat good question - I don't know but will follow up (08:02:43 AM) topfs2: mru, Lunds University, LTH (08:02:48 AM) mru: bah, kth ftw (08:02:49 AM) topfs2: you? (08:02:51 AM) topfs2: haha (08:02:52 AM) ***maltanar misses not having midterms in Uppsala :( (08:03:21 AM) mru: we had exams 4 times per year (08:03:23 AM) mru: more or less (08:03:44 AM) ***cfriedt will never have to write another (academic) exam for the rest of his life :) (08:03:58 AM) maltanar: echoing drinkcat's question - what to we deliver for the mid-term? (08:04:05 AM) maltanar: if anyone has any info? (08:04:14 AM) Jefro: maltanar - I have an action item to find out & follow up (08:04:16 AM) mru: cfriedt: but you have a gsoc midterm coming up (08:04:26 AM) av500: much harder to pass (08:04:28 AM) mru: I *will* need more than a float array multiplication (08:04:34 AM) cfriedt: naturally (08:04:37 AM) Jefro: I believe it is just mentor evaluations of each project (08:05:34 AM) Jefro: and student evaluations of each mentor as well (08:06:02 AM) Jefro: I don't know whether there is a specific requirement for code delivery - basic google page doesn't specify (08:07:11 AM) Jefro: we can discuss next week, hopefully jkridner will be back (08:07:19 AM) Jefro: I think we are officially done - meeting adjourned, thanks all (08:07:26 AM) drinkcat: thanks. (08:07:30 AM) drinkcat: good evening all (08:07:36 AM) ppoudel: thanks (08:07:46 AM) maltanar: thanks, good evening/morning all! (08:07:54 AM) maltanar left the room. (08:08:00 AM) notzed: morning all (08:08:16 AM) topfs2: hey thats not UGT! ;) (08:08:59 AM) notzed: well it's not often you get a meeting that starts in the evening and finishes in the morning, so may as well use it (08:09:12 AM) topfs2: hehe :)
https://elinux.org/index.php?title=BeagleBoard/GSoC/Meetings/20100621&diff=prev&oldid=42559
CC-MAIN-2017-43
refinedweb
3,143
71.18
On 31 January 2014 03:47, Andrew Barnert abarnert@yahoo.com wrote: On Jan 30, 2014, at 17:32, Chris Angelico rosuav@gmail.com wrote: On Fri, Jan 31, 2014 at 12:07 PM, Steven D'Aprano steve@pearwood.info wrote: One of my aims is to avoid raising TypeError unnecessarily. The statistics module is aimed at casual users who may not understand, or care about, the subtleties of numeric coercions, they just want to take the average of two values regardless of what sort of number they are. But having said that, I realise that mixed-type arithmetic is difficult, and I've avoided documenting the fact that the module will work on mixed types. Based on the current docs and common sense, I would expect that Fraction and Decimal should normally be there exclusively, and that the only type coercions would be int->float->complex (because it makes natural sense to write a list of "floats" as [1.4, 2, 3.7], but it doesn't make sense to write a list of Fractions as [Fraction(1,2), 7.8, Fraction(12,35)]). Any mishandling of Fraction or Decimal with the other three types can be answered with "Well, you should be using the same type everywhere". (Though it might be useful to allow int->anything coercion, since that one's easy and safe.) Except that large enough int values lose information, and even larger ones raise an exception: >>> float(pow(3, 50)) == pow(3, 50) False >>> float(1<<2000) OverflowError: int too large to convert to float And that first one is the reason why statistics needs a custom sum in the first place. When there are only 2 types involved in the sequence, you get the answer you wanted. The only problem raised by the examples in this thread is that with 3 or more types that aren't all mutually coercible but do have a path through them, you can sometimes get imprecise answers and other times get exceptions, and you might come to rely on one or the other. So, rather than throwing out Stephen's carefully crafted and clearly worded rules and trying to come up with new ones, why not (for 3.4) just say that the order of coercions given values of 3 or more types is not documented and subject to change in the future (maybe even giving the examples from the initial email)? You're making this sound a lot more complicated than it is. The problem is simple: Decimal doesn't integrate with the numeric tower. This is explicit in the PEP that brought in the numeric tower: See also this thread (that I started during extensive off-list discussions about the statistics.sum function with Steven): Decimal makes the following concessions for mixing numeric types: 1) It will promote integers in arithmetic. 2) It will compare correctly against all numeric types (as long as FloatOperation isn't trapped). 3) It will coerce int and float in its constructor. The recently added FloatOperation trap suggests that there's more interest in prohibiting the mixing of Decimals with other numeric types than facilitating it. I can imagine getting in that camp myself: speaking as someone who finds uses for both the fractions module and the decimal module I feel qualified to say that there is no good use case for mixing these types. Similarly there's no good use-case for mixing floats with Fractions or Decimals although mixing float/Fraction does work. If you choose to use Decimals then it is precisely because you do need to care about the numeric types you use and the sort of accuracy they provide. If you find yourself mixing Decimals with other numeric types then it's more likely a mistake/bug than a convenience. In any case the current implementation of statistics._sum (AIUI, I don't have it to hand for testing) will do the right thing for any mix of types in the numeric tower. It will also do the right thing for Decimals: it will compute the exact result and then round once according to the current decimal context. It's also possible to mix int and Decimal but there's no sensible way to handle mixing Decimal with anything else. If there is to be a documented limitation on mixing types then it should be explicitly about Decimal: The statistics module works very well with Decimal but doesn't really support mixing Decimal with other types. This is a limitation of Python rather than the statistics module itself. That being said I think that guaranteeing an error is better than the current order-dependent behaviour (and agree that that should be considered a bug). If there is to be a more drastic rearrangement of the _sum function then it should actually be to solve the problem that the current implementation of mean, variance etc. uses Fractions for all the heavy lifting but then rounds in the wrong place (when returning from _sum()) rather than in the mean, variance function itself. The clever algorithm in the variance function (unless it changed since I last looked) is entirely unnecessary when all of the intensive computation is performed with exact arithmetic. In the absence of rounding error you could compute a perfectly good variance using the computational formula for variance in a single pass. Similarly although the _sum() function is correctly rounded, the mean() function calls _sum() and then rounds again so that the return value from mean() is rounded twice. _sum() computes an exact value as a fraction and then coerces it with return T(total_numerator) / total_denominator so that the division causes it to be correctly rounded. However the mean function effectively ends up doing return (T(total_numerator) / total_denominator) / num_items which uses 2 divisions and hence rounds twice. It's trivial to rearrange that so that you round once return T(total_numerator) / (total_denominator * num_items) except that to do this the _sum function should be changed to return the exact result as a Fraction (and perhaps the type T). Similar changes would need to be made to the some of squares function (_ss() IIRC). The double rounding in mean() isn't a big deal but the corresponding effect for the variance functions is significant. It was after realising this that the sum function was renamed _sum and made nominally private. To be clear, statistics.variance(list_of_decimals) is very accurate. However it uses more passes than is necessary and it can be inaccurate in the situation that you have Decimals whose precision exceeds that of the current decimal context e.g.: import decimal d = decimal.Decimal('300000000000000000000000000000000000000000') d Decimal('300000000000000000000000000000000000000000') d+1 # Any arithmetic operation loses precision Decimal('3.000000000000000000000000000E+41') +d # Use context precision Decimal('3.000000000000000000000000000E+41') If you're using Fractions for all of your computation then you can change this since no precision is lost when calling Fraction(Decimal): import fractions fractions.Fraction(d)+1 Fraction(300000000000000000000000000000000000000001, 1) Oscar
https://mail.python.org/archives/list/python-ideas@python.org/message/XQ5IAQPU4A5D2W6XDTIPA2GIONUWZBWI/
CC-MAIN-2021-49
refinedweb
1,163
59.03
PyFrameObject.f_gen is a pointer (not a reference) to a generator/coroutine object. But the latter doesn't always correctly clean it up when it dies. This pointer is used by frame.clear(). Here is an example I made, which ends in a segfault. This example assumes we apply the patch of issue27811 first, otherwise it just crashes earlier in the same way as issue27811. # execute this with "python -Werror" import gc async def f(): pass cr = f() frame = cr.cr_frame del cr gc.collect() # create some randomness to reuse the memory just freed by 'cr' import asyncio print("ping") frame.clear() Patch attached. No test, but you can copy the above example.
https://bugs.python.org/msg273200
CC-MAIN-2021-04
refinedweb
113
69.28
Creating steps New in v12.9.0 Similar to a Page, grouping can be achieved by using the Step class. Other than with pages, with steps the developer can dictate the order of pages to be visited: the user is not able to move freely between pages but moves to the next/previous step with a next/previous button. Page and Step cannot be combined within a single parametrization. A Step is defined in the parametrization similar to a Page. The labels of the next and previous buttons can be set per step: from viktor.parametrization import ViktorParametrization, Step, TextField, NumberFieldclass Parametrization(ViktorParametrization): step_1 = Step('Step 1 - without views', next_label="Go to step 2") step_1.input_1 = TextField('This is a text field') step_1.input_2 = NumberField('This is a number field') step_2 = Step('Step 2 - with views', views=['geometry_view', 'data_view'], previous_label="Go to step 1") step_2.input_1 = TextField('This is a text field') step_2.input_2 = NumberField('This is a number field')
https://docs.viktor.ai/docs/create-apps/layout-and-styling/steps/
CC-MAIN-2022-40
refinedweb
160
66.94
import "github.com/xiaonanln/goworld/engine/lib/gwsnappy" Package snappy implements the snappy block-based compression format. It aims for very high speeds and reasonable compression. The C++ snappy implementation is at decode.go decode_amd64.go encode.go encode_amd64.go snappy.go var ( //. DecodedLen returns the length of the decoded block.. MaxEncodedLen returns the maximum length of a snappy block, given its uncompressed length. It will return a negative value if srcLen is too large to encode. Reader is an io.Reader that can read Snappy-compressed bytes. NewReader returns a new Reader that decompresses from r, using the framing format described at Read satisfies the io.Reader interface. Reset discards any buffered data, resets all state, and switches the Snappy reader to read from r. This permits reusing a Reader rather than allocating a new one. Writer is an io.Writer that can write Snappy-compressed bytes.. Close calls Flush and then closes the Writer. Flush flushes the Writer to its underlying io.Writer. Reset discards the writer's state and switches the Snappy writer to write to w. This permits reusing a Writer rather than allocating a new one. Write satisfies the io.Writer interface. Package gwsnappy imports 8 packages (graph) and is imported by 1 packages. Updated 2017-11-06. Refresh now. Tools for package owners.
https://godoc.org/github.com/xiaonanln/goworld/engine/lib/gwsnappy
CC-MAIN-2018-30
refinedweb
219
52.97
Client Side Bean Validation Idea's from the PastJay Balunas Apr 29, 2010 2:27 PM Hi Guys, After our team meeting I gave emmanuel the transcript. He recalled a very similar conversation way back in 2008. Alex please review this and determine where this fits in. Many of the concepts are the same, although some of the details & JSF have changed. Here is a except from a mail by Pete from the october 13h 2008 (older than I thought). This was designed by Pete and I and reviewed by a few of the BV and JSF EG. D) TRUE CLIENT SIDE VALIDATION 10) Trinidad Client Side Validation framework Standardize this in order to implement client side validation in JSF; with this, we can then support client side constraints from JSR-303. Further, the Trinidad client side validation and conversion would need some (simple) extensions [1] to work well with model validation. For the purposes of this discussion, let's refer to this as "Trinidad client side validation and conversion". IS THERE ANYONE WHO KNOWS THE TRINIDAD PROJECT WELL WHO CAN STEP UP TO PRODUCE THIS PROPOSAL? 11) What JSR-303 provides * Bean Validator exposes metadata about a particular constraint. Constrains can be composed of other constraints, thus allowing other validation in other layers to be built up and allowing fine grained constraints. * If you define your own constraints you can add compose it from built in constraints, and your own constraints. Constraints can be composed of constraints that are themselves compositions You can read more at With this information, we can provide client side validators that will validate all built in constraints to JSR303; provide some validation when people compose constraints from built-in constraints, and also allow people to write their own JS to implement custom constraints. 12) What JSF needs to specify in addition to the Trinidad Client Side Validation Framework * JSR-314 should map each a resource, accessed by a GET request, in the javax.faces.validator namespace, to a JS resource which implements the constraint. I propose the following resource URLs (Roger Kitian, does this match the JSF2 ajax stuff, or should we be using . rather / to delimit packages?) /<context-root>/javax/faces/validator/<validator-class-fqn>.js For example: /foo/javax/faces/validator/javax/validator/notNull.js /foo/javax/faces/validator/javax/validator/length.js /foo/javax/faces/validator/com/acme/validator/ukPostcode.js * We propose a simple convention based system, where this JS is stored in the correct package, and loaded by JSF. Thus the notNull.js would be loaded by the classloader as javax/validator/notNull.js. This removes the need to have any constraint registry * The core implementation of a constraint in JS returns true/false. Inline javascript will be to call the constraints. * A JSR-314 implementation would be required to implement the built in constraints in JSR-303 in JS. * The Trinidad client side validation and conversion framework allows the instance of the validator to describe: - a number of JS imports (see extension (a)). A list of resources using the schema described above is returned. - some inline JS. A JS constructor function should be built automatically by the JSF2 implementation, such that it calls the each metadata type validators (which are imported). If any return false, the function should create a TrFacesMessage (part of the Trinidad client side validation and conversion framework), providing for interpolation the piece of metadata which failed, the label and the submitted value. As no two fields have identical sets of metadata types, we need to build a unique function for each field. The constructor function should have a unique name based on the client id of the input component. - A JS constructor, which passes in the values to be interpolated. This would just call the unique constructor. OPEN ISSUE: we could merge the above into this function call. - The Model Validator would provide the value and a message to be interpolated. Interpolation of parameters and the label would be done on the server side, producing a semi-expanded string. 13) Error messages * To accommodate contextual constraint messages that cannot be created on the client side, we require the constraint to use a bundle key as the message. * This key is resolved both in a "server" bundle and a "client" bundle. If a key cannot be found in the "client" bundle, the "server" bundle is used. * The locale to use will be determined on the server, by the selected locale for the request. * If the BV constraint is composed, then one JS validation is created for each composing and composed constraint. * If the BV constraint is composed but ought to generate a singe error, then the rendered inline JS function calls all the composing constraints. * This approach works if only one possible message is generated per constraint (BV allows you to give out multipple) - If different messages can be generated by a given constraint, the list of possible message keys will have to be exposed by the JS constraint implementation. The eager resolution is now possible. - If multiple messages can be generated by a given constraint at the same time (ie several errors per constraint), then that's harder. I imagine JSF could artificially concatenate. Does not seem great. OPEN ISSUE. 14) Extensions to the Trinidad client side validation and conversion framework needed for JSR303 integration: (a) Alter the signature of ClientValidator:public String getClientLibrarySource(FacesContext context); to ClientValidator:public List<String> getClientLibrarySource(FacesContext context); to allow multiple JS files to be imported for use in this validator. JSF2 is clever enough to detect duplicate JS imports and remove them I believe. 1. Re: Client Side Bean Validation Idea's from the Pastvarun shaji Feb 15, 2011 12:34 AM (in response to Jay Balunas) Found the implmentation going on Richfaces 4..Gr8!!
https://developer.jboss.org/message/540221
CC-MAIN-2016-36
refinedweb
968
53.31
This what you are expecting to do with this shield, then you can vote and tell me what you want to see instead on this motor shield :-). OEM or Original? The original Arduino Motor/Stepper/Servo Shield is available from Adaftruit Industries and costs less than $20. I’m using a OEM version, see this link. The functionality is the same, except that the OEM version only runs with motors up to 16 VDC, while the original shield is for motors up to 25 VDC. The board has two STMicroelectronics L293D Motor H-Bridge IC’s which can drive up to 4 DC motors (or up to 2 stepper motors) with 0.6 A per bridge (1.2 A peak). The 74HCT595N (my board has the SN74HC595 from Texas Instrument) is a shift register used for the H-Bridges to reduce the number of pins needed (more about this in a next post). A terminal block with jumper is providing power to the DC/stepper motor. The 5 VDC for the servos is taken from the FRDM board. ❗ The FRDM-KL25Z can only give a few hundred mA on the 5V Arduino header. That works for small servos, but I recommend to cut the 5V supply to the servos and use a dedicated 5V (or 6V) for the servos. Outline In this tutorial, I’m creating a project with CodeWarrior for MCU10.4 for the FRDM-KL25Z board, and then add support for two servo motors. Processor Expert Components This tutorial uses added Processor Expert components which are not part of CodeWarrior distribution. The following other components are used: - Wait: allows waiting for a given time - Servo: high level driver for hobby servp motors Make sure you have the latest and greatest components loaded from GitHub. Instructions how to download and install the additional components can be found here. Creating CodeWarrior Project To create a new project in CodeWarrior: - File > New > Bareboard Project, give a project name - Specify the device to be used: MKL25Z128 - OpenSDA as connection - I/O support can be set to ‘No I/O’ - Processor Expert as Rapid Application Development option This creates the starting point for my project: Servo Motor Servo motors are used in RC (Radio Control) or (hobby) robotics. The motor has 3 connectors: - GND (Black) - Power (Red), typically 5V, but can be 6V or even higher - PWM (White or Yellow), signal for position information The PWM signal typically has frequency of 50 Hz (20 ms), with a duty (high duration) between 1 ms and 2 ms. The screenshot below shows such a 50 Hz Signal with 1.5 ms duty cycle (servo middle position): 💡 Many servos go below 1 ms and beyond 2 ms. E.g. many Hitec servos have a range of 0.9…2.1 ms. Check the data sheet of your servos for details. If you do not have a data sheet, then you might just experiment with different values. With a PWM duty of 1 ms to 2 ms within a 20 ms period, this means that only 10% of the whole PWM duty are used. This means if you have a PWM resolution of only 8bits, then only 10% of 256 steps could be used. As such, an 8bit PWM signal does not give me a fine tuned servo positioning. The duration of the duty cycle (1..2 ms) is translated into a motor position. Typically the servo has a built-in closed-loop control with a microcontroller and a potentiometer. 💡 I have found that it is not important to have an *exact* 50 Hz PWM frequency. You need to experiment with your servo if it works as well with a lower or higher frequency, or with non-fixed frequency (e.g. if you do a software PWM). Many servos build an average of the duty cycle, so you might need to send several pulses until the servo reacts to a changed value. Servo Processor Expert Component I’m using here my own ‘Servo’ component which offers following capabilities: - PWM configuration (duty and period) - Min/Max and initialization values - Methods to change the duty cycle - Optional command line shell support: you can type in commands and control the servo. This is useful for testing or calibration. - Optional ‘timed’ moving, so you can move the servo faster or slower to the new position in an interrupt driven way 💡 Of course it is possible to use servos without any special components. From the Components view, I add the Servo component. To add it to my project, I can double-click on it or use the ‘+’ icon in that view: 💡 In case the Processor Expert views are not shown, use the menu Processor Expert > Show Views This will add a new ‘Servo’ component to the project: But it shows errors as first the PWM and pin settings need to be configured. PWM Configuration On the Arduino Motor/Stepper/Servo shield the two Servo motor headers are connected to PWM1B and PWM1A (see schematic): Following the signals, this ends up at following pins on the KL25Z: - Servo 1 => PWM1B => Arduino Header D10 => FRDM-KL25Z D10 => KL25Z pin 73 => PTD0/SPI0_PCS0/TPM0_CH0 - Servo 2 => PWM1A => Arduino Header D9 => FRDM-KL25Z D9 => KL25Z pin 78 => ADC0_SE6b/PTD5/SPI1_SCK/UART2_TX/TPM0_CH5 From the pin names on the Kinets (TPM0_CH0 and TPM0_CH5) I can see that this would be the same Timer (TPM0), but with different channel numbers (CH0 and CH5). For my first servo Processor Expert has created for me a ‘TimerUnit_LDD’ which I will be able to share (later more on this). The TimerUnit_LDD implements the ‘Logical Device Driver’ for my PWM: So I select the PWM component inside the Servo component and configure it for TPM0_C0V and the pin PTD0/SPI0_PCS0/TPM0_CH0 with low initial polarity. The period of 20 ms (50 Hz) and starting pulse with of 1.5 ms (mid-point) should already be pre-configured: 💡 I recommend to give it a pin signal name (I used ‘Servo1’) ❗ That I need to set the ‘initial polarity’ to low is a bug of Processor Expert in my view: the device supports an initial ‘high’ polarity, but somehow this is not implemented? What it means is that the polarity of the PWM signal is now inverted: a ‘high’ duty cycle will mean that the signal is low. We need to ‘revert’ the logic later in the Servo component. Because of the inverted PWM logic, I need to set the ‘Inverted PWM’ attribute in the Servo component: The other settings of the Servo component we can keep ‘as is’ for now. The ‘Min Pos PWM’ and ‘Max Pos PWM’ define the range of the PWM duty cycle which we will use later for the servo position. Adding Second Servo As with the first servo, I add the second servo from the Components Library view. As I already have a TimerUnit_LDD present in my system, Processor Expert asks me if I want to re-use the existing one or to create a new component: As explained above: I can use the same timer (just a different pin/channel), so I have my existing component selected and press OK. As above, I configure the timer channel and pin with initial polarity: And I should not forget to enable the inverted logic: Test Application Time to try things out. For this I create a simple demo application which changes the position of both servos. First I add the Wait component to the project from the Components Library: As I have all my Processor Expert components configured, I can generate the code: Next I add a new header Application.h file to my project. For this I select the ‘Sources’ folder of my project and use the New > Header File context menu to add my new header file: In that header file Application.h I add a prototype for my application ‘run’ routine: From the main() in ProcessorExpert.c, I call that function (not to forget to include the header file): The same way I add a new source file Application.c: To test my servos, I’m using the SetPos() method which accepts a 8bit (0 to 255) value which is the position. To slow things a bit, I’m waiting a few milliseconds between the different positions: #include "Application.h" #include "WAIT1.h" #include "SERVO1.h" #include "SERVO2.h" void APP_Run(void) { uint16_t pos; for(;;) { for(pos=0;pos<=255;pos++) { SERVO1_SetPos(pos); SERVO2_SetPos(pos); WAIT1_Waitms(50); } } } Save all files, and we should be ready to try it out on the board. Build, Download and Run That’s it :-). Time to build the project (menu Project > Build Project) and to download it with the debugger (menu Run > Debug) and to start the application. If everything is going right, then the two servos will slowly turn in one direction until the end position, and then return back to the starting position. Summary Using hobby servo motors with the FRDM-KL25Z, CodeWarrior, Processor Expert and the additional components plus the Arduino/Stepper/Servo Shield is very easy in my view. I hope this post is useful to start your own experiments with hobby servo motors to bring any robotic project to the next level :-). I have here on GitHub a project which features what is explained in this post, but with a lot more components, bells and whistles :-). What’s next? Actually, I have several things in mind. But I let the readers of this post vote :-): Or simply post a comment about what you have in mind. The next tutorial is about timed servo moves. List of Tutorials - Tutorial: Arduino Motor/Stepper/Servo Shield – Part 1: Servos - Tutorial: Arduino Motor/Stepper/Servo Shield – Part 2: Timed Servo Moves - Tutorial: Arduino Motor/Stepper/Servo Shield – Part 3: 74HCT595 Shift Register Happy Servoing 🙂 Pingback: Tutorial: Arduino Motor/Stepper/Servo Shield – Part 2: Timed Servo Moves | MCU on Eclipse Pingback: Tutorial: Arduino Motor/Stepper/Servo Shield – Part 3: 74HCT595 Shift Register | MCU on Eclipse I got this shield but to drive a stepper motor from a cdrom instead. I have no idea how to power it. To power the shield: the logic is powered from the FRDM board. For the motor power supply there is a separate power connector. Ok, I figured out how to power and drive the shift register, but do you need PWM for the stepper motors? or is it just for servos? I figured it out, I followed your second part of the tutorial on the shift register, and checked out the L293D IC connections, sent the bytes for a simple rotation and it works. I am using an old desktop power supply 5v output to power the motor externally. Its pretty sleek. I was able to use both MSB first and LSB first. Now I am not sure how to make just a single turn, or turn just a few degrees. I checked your tutorial on usig the reset button as an input, that worked too :). Thanks for these tutorials man. Allways at your service! I got a similar motor shield but to drive a DC motor. I have a problem because when I send something to the shield to move a servo, it answers me but if I sent something to move a DC motor, it doesn’t answers. What do you mean with ‘answers me’? You mean it moves? Keep in mind that the signal for a DC motor and for a servo motor are completely different. You say you’re using your own Servo Processor Expert Component. Where can it be found? Thanks. Hi AK, its in the component set released on SourceForge, see hi i have this shield and i was wondering would there be a way to hack the shields code for then you could another shield using the analogue pins, as i need 8 motors and i wont to only use one arduino. This all depends on what exactly you want to do. Using the analog pins only will work if you can mux the pins for PWM signals. would you know how to mux the pins for the PWM signals because i looked at the code for the shield (it’s a adafruit motor shield v1) and i only half understand it the simplest and easiest way is if you use Processor Expert: it will do the Muxing for you. hey Erich, i’m trying to run this project using the Arduino motor shield V2.2. the Enable A/B LEDs are on and I checked the other available pins to make sure I didn’t have the PWM linked to the wrong pins but the servos aren’t going through their routine. I’ve tried connecting an external 6V power supply but I’m still not getting anything. What could the problem be? Hi Ryan, can you check both the motor signals and the PWM with a logic analyzer? Erich Hey Erich, thank you for your timely response! I don’t have a logic analyzer readily available, sorry about that. my main goal is to merge concepts from your “accelerating the kl25z freedom board” with my motor shield to create a self stabilizing system using the MMA8451. I am pretty green when it comes to CW and processor expert but I’ve been able to learn through your tutorials. as for the servos I wish I had a logic analyzer so I could tell you if i’m getting a signal to the servos, but alas I don’t have one in my immediate possession. Hi Erich I am a beginner and try to build a robot with this motor shield. My 4 motors run without any problems, but the servo i would like to use for the Ultrasonic sensor acts very strange. Connected directly to the arduino it works OK, connected to the motorshield it acts like a continuos servo, it allways turns to the right side and does not go back. I have to stop it, because it would damage the servo. I tried a second motor shield and have the same problem. It would be very nice if you could give me a tipp what I could do. Thank you Franz I am sorry, after days of trying I found out that I uses the pin of the Ultrasonic trigger. My excuse, I am a dynosaur of over 70. Greetings and please delete my question Fran Hi Franz, ah, I see you found it 🙂 And no worries: using the wrong pin happens to me from time to time too. Again: having something to inspect the signal is always the best way to find out what is going on. Erich Hi Franz, hard to tell from remote, but I suggest that you hook up a logic analyzer/oszilloscope to that pin to see how the signal looks like. I hope this helps, Erich Thank you Erich, Regards Franz Erich, good morning. Excellent your tutorial. Please, help me if you can. I need to drive two steppers(NEMA17) with Arduino, ShieldL293D and Joystick to control the directions and speed of the steppers. Do you have code that do this? Thanks a lot. Hi Marco, no, I have not published the code for that. Check the data sheet of the L293D how you can drive the stepper motor. If in doubt, check the many articles on the web how you can use full H-bridges to drive a stepper motor. Using the Joystick is just on top of this. I hope this helps. Hi Erich, how are you doing? I have a question…my motor shield is L293D dual, which can controlk up to 4 DC motors. But I am having problems to make it work using serial communication port. How could I use the L293D only by changing the input levels using HIGH and LOW, instead of serial signal? Thank you and keep it up this good jog! Regards. Hi Erich! I want ask you if you know where find the instructions to use a frdm kl25z and the Adaftruit’s motor shield of this post with DC motors. Thanks! Hi Carlos, have a look here: I hope this helps, Erich
https://mcuoneclipse.com/2013/05/30/tutorial-arduino-motorstepperservo-shield-part-1-servos/
CC-MAIN-2017-13
refinedweb
2,695
69.11
Allow running bash shell commands Current implementation of process library has limited usability. CreateProcess record stores CmdSpec field, which is either a RawCommand or ShellCommand. The problem is that: RawCommandcommand quotes and escapes the command parameters ShellCommanddoes no escaping but it runs command in shshell Corollary: there is no way to run bash command with unescaped parameters. As a result there is no way to run this command (and many others): diff <(echo $ENV_FOO) <(echo $ENV_BAR) Running it as a RawCommand (using proc function) fails because command line parameters are escaped and become incorrect. Running it as ShellCommand (using shell function) fails because this is not valid sh syntax. I propose to create function that allows user to run bash commands without escaping the parameters (or even better, run any shell the user wants). In other words this program: import System.Exit import System.Process main :: IO () main = do (_, _, _, pid) <- createProcess (SOME_NEW_FUNCTION "diff" ["<(echo $FOO)", "<(echo $BAR)"] ) { env = Just [("FOO","Foo"),("BAR","Bar")] } ecode <- waitForProcess pid case ecode of ExitSuccess -> putStrLn "All’s right with the world!" ExitFailure _ -> putStrLn ":-(" should produce: 1c1 < Foo --- > Bar
https://gitlab.haskell.org/ghc/ghc/-/issues/8838
CC-MAIN-2020-29
refinedweb
188
53
UNKNOWN Project description hurry.jgrowl Introduction This library packages the jQuery jgrowl plugin for hurry.resource. How to use? You can import hurry.jgrowl from hurry.jgrowl and .need it where you want these resources to be included on a page: from hurry.jgrowl import jgrowl .. in your page or widget rendering code, somewhere .. jgrowl.need() This requires integration between your web framework and hurry.resource, and making sure that the original resources (shipped in the jgrowl-build directory in hurry.jgrowl). CHANGES 1.2.4 (2010-10-07) - Initial public release, packages jgrowl 1.2.4 Download Project details Release history Release notifications Download files Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
https://pypi.org/project/hurry.jgrowl/
CC-MAIN-2019-35
refinedweb
125
62.95
Succeeding With Struts: Dynamically Sized FormsRemember that the HashMap values populated during the setup will disappear as soon as the form displays, because the form is request-scoped rather than session-scoped. What this means in particular to us is that all the Person objects go away. So, if we attached the text field to the comment property of the Person bean, we'd get a null pointer exception when we submitted the form, because the Person object would no longer be in the HashMap (in fact, we will have been given a completely new and empty HashMap.) So, instead, we need to store the comments in a seperate, parallel HashMap which stores them as simple strings. There are a few things to notice in the code above. First, because we're now iterating over HashMap entries, the values available from the c:forEach tag are in fact placeholders for the hash entries, with two properties. The key property is the value used to reference the hash (the strings "1", "2", "3", etc in our case), and the value property which has the value stored under that key. So, in this case, we must use the value property to get at the actual properties of the Person bean. Also, we need to construct a valid Struts property field for the text box. This is done using the JSTL extensions available in the html-el taglib. In this case, we store the comments by a string composed of the last name, a comma, and the first name of the actor. Finally, we need a new action to process the results: package demo;Again, the big difference here is that things are being stored as HashMaps. The code gets the keys (lastname,firstname), and displays the keys and comments on the console, i.e.: /** * * A Struts action that sends the new comments to the console */ import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import javax.servlet.ServletException; import javax.servlet.http.*; import org.apache.struts.action.*; import org.apache.struts.validator.DynaValidatorForm; public class ProcessHashFormAction extends Action { public ActionForward execute(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { DynaValidatorForm df = (DynaValidatorForm) form; HashMap hm = (HashMap) df.get("comments"); Iterator it = hm.keySet().iterator(); while (it.hasNext()) { String key = (String) it.next(); String comment = (String) hm.get(key); System.out.println(key + ":" + comment); } return mapping.findForward("success"); } } Fisher,Carrie:LeiaAlso notice that when control is returned to the JSP page, a blank table is printed. This is because the HashMap we created in the setup action is gone now, and we didn't recreate it when processing the results. You could keep that data around in a session variable, but then you're right back to where you were with the first solution. Better to choose a key that lets you get at the backend objects when the form is submitted, and always recreate any other form data needed. Ford,Harrison:Han Hamill,Mark:Luke Which one is best? The array-based solution offers the ability to keep everything in one bean, while the hash-based solution avoids any session-scoped data. Which one works better for you will be the final decision. NOTE: A WAR file containing all the code and libraries needed to run these examples can be found at. About the author James Turner is the Director of Software Development for Benefit Systems, Inc. He is also a contributor to the Apache Struts project. He has two books out on web-facing Java technologies, MySQL and JSP Web Applications, and Struts Kick Start. Page 2 of 2
http://www.developer.com/java/ent/article.php/10933_3321521_2/Succeeding-With-Struts-Dynamically-Sized-Forms.htm
CC-MAIN-2014-42
refinedweb
604
64.41
Errata for Learn to Program (2nd edition) The latest version of the book is P-Sep-15) Paper page: 20 Sorry the page number is incorrect - i am reading using the kindle cloud reader. Two serious issues: (1) the author's email address needs to be here so that people can ask questions. (2) start of chapter 2 - in the hello world ruby program. in order for it to run - if you are a linux user - the terminal must be in the same directory as the ruby code. duh! but the author doesn't specify this, and a noob user would know any better and his/her code simply would not run. this is frustrating because we are paying good money for a book so that it lays things out in a easy to read/learn manner. pls take this feedback on board. rgds Ben--Ben - Reported in: P5.0 (04-Jun-15) PDF page: 61 Chapter on Methods does not use the community accepted Ruby Style Guide of encasing parameters in parenthesis. Parenthesis are only left off of methods if there are no arguments. The lack of parenthesis makes the code examples difficult to read with no benefit. Please see the Ruby Style Guide listed on github--Brent - Reported in: P5.0 (10-May-16) PDF page: 64 It seems like the blank lines that are present in the representation of screen output disappear in the epub version of the text. For example, in the pdf is looks like this (dashes represent highlighting) ------------------------------ This puts returned: ------------------------------ but in the epub it looks like: ------------------------------ This puts returned: --------------------------------Sam Gechter - Reported in: P4.0 (03-Mar-14) Paper page: 66 In the method 'ask' the answer variables are declared in the private scope of an 'if' then used later as the return at which point it is inaccessible. I think it should be something like.... def ask question answer # declare the variable to return while true puts question ... end answer # return true or false depending on what the user entered end--Jonathan Dade - Reported in: P4.0 (12-Oct-13) PDF page: 87 Text halfway down page reads "Viola!" While this is a recognized English word, it ought to read "Voila!"--Luke - Reported in: P4.0 (26-Oct-13) PDF page: 116 ". However, none of these is actually called until the very last line.)" Shouldn't it be "none of these are"? - Reported in: P5.0 (13-Jan-16) PDF page: 141 The answer ("How I would do it") to the "modern roman numeral" question was "MIM", when it should be "MCMXCIX", which represents the year 1999 as in "puts(roman_numeral(1999)) ". Thank you for your work!
https://pragprog.com/titles/ltp2/errata
CC-MAIN-2016-22
refinedweb
446
72.76
Feedback Getting Started Discussions Site operation discussions Recent Posts (new topic) Departments Courses Research Papers Design Docs Quotations Genealogical Diagrams Archives While monads can be encoded in dynamically-typed language, the usual encodings are usually uglier than monads in type-class-based language because type inference is not there to help determine which monad instance we're talking about. Some use-cases of type inference can be replaced by dynamic type dispatch, but not the so-called "return type polymorphism" where it is the type expected by the context that matters, not the type carried by values. Monads are just one instance where return-type polymorphism matters (in the form of "return : forall a, a -> m a"), but this is a general problem with other structures (typically structures with a default element such as monoids/groups). Intuitively it seems that this problem would be solveable by keeping, in the dynamic type representation, a "not known yet" marker, and using the free monad operations in this case. But there is a gap from intuition from usable code, and Tony Garnock-Jones just wrote a very nice blog post exposing this technique in Racket: Monads in Dynamically-Typed Languages (run-io (do (mdisplay "Enter a number: ") n <- mread all-n <- (return (for/list [(i n)] i)) evens <- (return (do i <- all-n #:guard (even? i) (return i))) (return evens))) class Foo f where ret :: a -> f a comp :: f a -> f b -> Bool foo = let x = ret 10 y = ret 'a' in if comp x y then x else y -- which branch to take? That sounds right. The whole problem seems to involve time at a fairly basic level: we don't know the type "now", we will learn the type "later". So using the trick in circumstances where you need an answer now won't work. (For that reason, (coerce List (return 1)) is needed to make sure things are as expected when calling list primitives.) (coerce List (return 1)) An indirect CPS-like encoding of comp, where the program isn't so set on having the result right now, might be able to be made to work though. For example, and this is slightly handwavey, comp :: f a -> f b -> (Bool -> c) -> g c, where g would be some delayed computation structure coercible using the same mechanism that coerces fs, once the right type is known. comp comp :: f a -> f b -> (Bool -> c) -> g c g f Is your problem really different from the ambiguity that arises in: class Print a where Print :: a -> String class Parse a where Parse :: String -> a foo = Print (return (Parse a)) ? I think your code has bugs, but I assume you meant something like this: foo = print . parse This is an example where overloading on return type plainly can't work. You can't let the context figure out the instance if when the context works with any number of instances. My example above is one where overloading on return type works just fine, but isn't amenable to this dynamic encoding (unless you're ok with tricks a la Tony G's comment). I guess I'm just stating the obvious without any insight, but they do seem a little different to me. Shrug. Apparently here was another library with support for return-type polymorphism in Clojure that I didn't know about:
http://lambda-the-ultimate.org/node/5109
CC-MAIN-2018-51
refinedweb
561
59.77
<< patisakeMembers Content count12 Joined Last visited Community Reputation107 Neutral About patisake - RankMember patisake replied to patisake's topic in For BeginnersOkay, but where does he get this number 49? patisake replied to patisake's topic in For Beginnersno, it does the following thing otherwise paste the code and test it.... type eerste liefdes nummer: a (I type a instead of a number and press enter) the result is:type tweede liefdesnummer: 0 * 49 = 0 (that is what it puts on the screen after the enter of the first a nightcreature..are you speaking dutch?spreek je nederlands? patisake posted a topic in For BeginnersHello everybody, I have started with studying C++ and it goes very well since I never have learned a program language. I studied from the e-book juming into C++ which is a very good ebook for beginners. Now I have wrote a piece of code and when I put in the numbers it is going good. But <hen I do a test and as a first number or in dutch type eerste number and then an a or b I get a very strange result..Can somebody explain why he is doing that? So I learn more about behavour of C++ I have wrote the code in orwell devC++ the code is: #include <iostream> using namespace std; int main () { int eerste_liefdes_nummer; int tweede_liefdes_nummer; cout << "type eerste liefdes nummer: "; cin>> eerste_liefdes_nummer; cout << "type tweede liefdes nummer: "; cin>>tweede_liefdes_nummer; cout << eerste_liefdes_nummer << " * " << tweede_liefdes_nummer << " = " << eerste_liefdes_nummer * tweede_liefdes_nummer << endl; cout << "jullie zijn heel erg verliefd"; } Patrick - which website?is it free? - I have the e-book from michael dawson beginning c++ game programming but not the cd due to an ebook..anyone know where I can find the contence of the cd rom? - and what is the difference between ruby program language and python? - I heard that some game engine and animation software for game devolpment use python..is this correct? - Are there some good python tutorials for beginning or free ebooks? what are the advantage of python language? - what do you find the best if I want in the future go for 3d games pc and console?? C++ or java? - is this also for beginners stroustrup's book?is there a link for getting it free online? patisake posted a topic in For BeginnersI read on the internet that dev-c++ from bloodshed is not updated and is full of bugs? Is this true and if it is what is the best tool to use instead? patisake posted a topic in For BeginnersWhat do you people think of this book as a beginner? Anyone know a good book to start learning C++? This is the link to the book
https://www.gamedev.net/profile/205037-patisake/?tab=idm
CC-MAIN-2017-30
refinedweb
448
72.66
I think I get Spock Mocks now August 20, 2011 7 Comments I’ve been misunderstanding a fundamental issue of Spock Mocks. That’s annoying, but probably inevitable given that I work with so many state-of-the-art, evolving API’s. If you spend enough time on the bleeding edge, sooner or later you’ll get cut. The problem is, though, I’ve been telling people something wrong for some time now, and that’s not acceptable. Thus this blog post. It’s one thing for me to make a mistake, but it’s quite another for me to mislead others. I want to fix that now. Besides, if I made the mistake, it’s also possible others are missing it, too. I’m a big fan of the Spock testing framework. It’s very easy to learn, it works with both Java and Groovy systems, and it’s got a great mocking framework built into it. I’ve been a JUnit user for years, but I’ve never been able to commit to a mocking framework in Java. That’s partly because I still don’t find them particularly intuitive, and partly because I’m still not sure which one is going to win. I don’t want to commit to a framework (EasyMock? Mockito? PowerMock? etc) only to have to switch to a different one in a couple of years. Spock is fun, though, and I use it whenever I can, and not just for the Star Trek related puns, some of which I’ll have to adopt here. Back in June, I wrote an article for NFJS the Magazine, entitled “Spock: I have been, and always shall be, your friendly testing framework.” I’m going to use an example from that article, with some variations, to show what I recently learned. A basic Spock test Here is part of a Groovy class called Tribble that answers the question, “Do you know what you get when you feed a tribble too much?”: class Tribble { def feed() { def tribbles = [this] 10.times { tribbles << new Tribble() } return tribbles } } The answer, of course, is a whole bunch of hungry little tribbles. The feed method creates an ArrayList of tribbles by starting with the current instance and then adding 10 more. I know the return keyword isn’t strictly necessary, since closures automatically return their last evaluated value, but I use it sometimes for clear documentation. Groovy isn’t about writing the shortest code — it’s about writing the simplest, easiest to understand code that gets the job done. To test this method, here’s a Spock test. It extends the spock.lang.Specification class (which is required) and ends in the word “Spec” (which isn’t, but makes for a nice convention): import spock.lang.Specification class TribbleSpec extends Specification { Tribble tribble = new Tribble() def "feed a tribble, get more tribbles"() { when: def result = tribble.feed() then: result.size() == 11 result.each { it instanceof Tribble } } } I never thought JUnit was verbose until I met Spock. For those who haven’t used it much, first let me say you have something fun to look forward to. That said, let me explain the test. Spock tests have a def return type, then have a test name that describes what you’re trying to accomplish. The name is usually a short phrase, but it can be spread over several lines and even contain punctuation. (Hamlet D’Arcy gives a great example in his blog post on Spock mocks, which is also echoed in the cool Spock Web Console. I also agree with him that Spock mocks should be called “smocks”, but since it doesn’t have a direct Star Trek association I’m not sure that will catch on.) As Peter Niederweiser, the creator of the framework, points out, the method name becomes the body of an annotation, but that’s all under the hood. The rest of the test consists of a when and a then block, representing a stimulus/response pair. The when block contains the method invocation, and the then block includes a series of boolean conditions that must be true for the test to pass. Nice and simple. Tribbles do more than just eat, though. They react to others. Like Dr. McCoy, I can mock Vulcans Let me add a pair of methods to my Tribble class: String react(Klingon klingon) { klingon.annoy() "wheep! wheep!" } String react(Vulcan vulcan) { vulcan.soothe() "purr, purr" } The overloaded react method is based on a pair of interfaces. Here’s the Vulcan interface: interface Vulcan { def soothe() def decideIfLogical() } Here’s the Klingon interface: interface Klingon { def annoy() def fight() def howlAtDeath() } (Yeah, I know howling at death is a Next Generation thing, but go with it.) Since both Vulcan and Klingon are interfaces, a mocking framework can generate an implementation with just Java’s basic dynamic proxy capabilities, which means I don’t need CGLIB in my classpath. To test the react method that takes a Vulcan, here’s the Spock mocking feature in action: def "reacts well to Vulcans"() { Vulcan spock = Mock() when: String reaction = tribble.react(spock) then: reaction == "purr, purr" 1*spock.soothe() } Spock provides the Mock method to create a mock implementation of the interface. When I then invoke the react method, I check that it returns the proper String and (here’s the cool part), I verify that the soothe method in the mock is invoked exactly once. So far, so good. Klingons react rather badly to tribbles, however, so I thought it would funny if I had them throw an exception. Here’s my original test for the react method that takes a Klingon (warning: this doesn’t do what it looks like it does!): def "reacts badly to Klingons"() { Klingon koloth = Mock() koloth.annoy() >> { throw new Exception() } when: String reaction = tribble.react(koloth) then: 0*koloth.howlAtDeath() 1*koloth.annoy() reaction == "wheep! wheep!" notThrown(Exception) } Using the right-shift operator, my intention was to set the expectation that invoking the annoy method on a Klingon resulted in an exception. The plan was: - In the setup block (above when), declare that the annoymethod throws an exception, - In the thenblock, verify that the method got called. The problem is, what happens to the exception? Spock has two great methods for exception handling, called thrown and notThrown. I was able to verify that the method got called, but why did notThrown(Exception) return true? I even got back the string I expected. What’s wrong? The right way to mock a Klingon (from a galaxy far, far away, right?) Here’s the problem: according to the Spock wiki page on Interactions, interactions defined outside a then block (here declaring that react throws an exception) are called global, while those defined inside a then block are called local, and local overrides global. Also, interactions without a cardinality are optional, while those with a cardinality are required. In other words, I may have declared that react throws an exception, but in the then block I then changed it to say it actually doesn’t. In the then block, I say that react must be called once and doesn’t return anything. Therefore, no exception is thrown and the return value is as expected. To achieve what I was actually after, here’s the right way to mock my Klingon: def "reacts badly to Klingons"() { Klingon koloth = Mock() 1 * koloth.annoy() >> { throw new Exception() } 0 * koloth.howlAtDeath() when: String reaction = tribble.react(koloth) then: reaction == null thrown(Exception) } Now I set both the cardinality and the behavior outside the then block. The then block verifies both that the exception was thrown, and it checks the cardinality, too. Oh, and while I was at it, I verified that the howlAtDeath method didn’t get called. I doubt the Klingons howled at death when they burned down all the tribbles that Scotty beamed into their engine room just before they went to warp. Admittedly, I still find the syntax a bit confusing, but at least I get it now. Hopefully you’ll get it, too, and they’ll be nah tribble at all. (The source code for this example and the others used in my NFJS the Magazine article are in my GitHub repository, . Eventually they will make their way into my book, Making Java Groovy, available now through the Manning Early Access Program.) You can also put both interactions into the then-block. That’s where I usually put required interactions (because they describe an expected outcome). Into the setup-block I mostly put interactions with objects that act as pure stubs (i.e. I just want them to return something and am not interested in verifying anything about them). This is just a convention though, and it’s up to you whether you put an interaction into a setup- or then-block. The behavior is the same except that interactions in a then-block have higher precedence and can thus override other interactions (e.g. from a setup() method). What happens internally is that interactions in a then-block are moved to right before the corresponding when-block by Spock’s AST transform. This is necessary because Spock’s mocking framework (like most Java mocking frameworks but unlike Mockito) needs to know about all expected interactions beforehand. This also explains why you have to give Spock a little hint if you factor out an interaction from a then-block into a separate method (see Specification#interaction). Without this hint, Spock wouldn’t know that it has to move the code. What I find confusing is that you move code from the then (check if the test ran ok) block to the test (setup and run the test) block. You either do different tests now, and no longer test if methods are called once or never, or I’m declaring a state of severe confusion. What’s changed is the point in time when the interaction is declared, not the point in time when it’s verified. In the vast majority of cases, this won’t affect the outcome of the test. If you don’t want any magic to happen, declare all interactions up-front. Pingback: Thoughts about Spock (the test framework) « career over Thank You…i found you post very useful Thanks for posting. This was extremely helpful from a number of angles. Pingback: Groovy Spock und Mocking | Digitales Umfeld
https://kousenit.org/2011/08/20/i-think-i-get-spock-mocks-now/
CC-MAIN-2017-17
refinedweb
1,740
71.75
import fees does anybody know how much the import fees are to ship to Canada? Who cares you need a OW in your life!! Lol @crc I think it's about $2400 all in ... there was a member from Saskatchewan that posted how much. .. i ended up shipping to the US and picking it up. Almost a 1k$ in fees .. I don't need a Onewheel that bad.. @njcustom unfortunately in the last 6 months the Canadian dollar tanked about 25%... so $1500 USD = $2000 CAD ignoring the additional customs etc... actually as I type this likely $2400 won't do it anymore... probably gonna be more like $2700 CAD... @rainynite that's crazy. Again I don't need one that bad lol.. Might be cheaper to fly to California and pick one up The shipping to Canada is US$300, which is like CA$400 right now. This seems like a ton, but I just emailed them to get confirmation if this includes customs brokerage and taxes. In BC here I'm going to have to pay 15% PST+GST anyway, so that's US$224.85 on US$1499. If they do include the taxes, then the shipping component is only $75-ish, which is okay. If they don't cover the taxes, then I'll take the cheaper inside-US shipping option, and get it delivered to a mail service over the border in Washington state, and go pick it up and pay the taxes when I bring it home. I'm planning on CA$2400 all in. An update to my last post... FW confirmed that the US$300 does not include taxes and duties. So if you can get it shipped to a US address and go pick it up, you could save a few hundred bucks. Apparently these guys have OWs in stock in Canada. A friend was at the boat show in Vancouver the other day and saw they had a pile of them. I just called them and they basically charge the same all in costs as getting it shipped, but of course the benefit is that it's close to Vancouver and therefore don't need to wait 12 weeks. Thinking of getting one for my son. .. just called them and bought one. They said they have quite a few in stock and are just trying to get the word out. I got mine @ Western Canoe Kayak ;) I hear they will even ship for free most places in Canada. Get some One Wheel in your life! Sounds like we have a contingent of Vancouverites... Once the weather gets better anyone up for a meetup? @rainynite giddy up. I am going to pick mine up tomorrow. Cant wait. @jcmaros awesome. Take some time to practice and soon you'll be flying. It was raining when I got mine so I practiced doing figure-8s in a school covered area. Just don't forget the protective gear. Saved my head and wrists so far. This is awesome you Vancouver guys can just go and get one. I think I'm the only one in Victoria. I had to wait 10 weeks to get mine. I'd like to meet up some time on at trip to Van. It's not as good riding in the rain eh? I do it in gumboots, but don't get the same foot-board connection feeling. Also, the gravel and grit piles up under my front foot. I don't have a fender, but I think it would tend to clog up with grit. @rainynite Funny you say that because that is exactly what I did. It was awesome
https://community.onewheel.com/topic/647/import-fees/4
CC-MAIN-2021-21
refinedweb
612
93.24
Search Create Pega 7 training STUDY Flashcards Learn Write Spell Test PLAY Match Gravity Terms in this set (86) access group An access: The portal layout that a user sees first after logging in. The local customization RuleSet name and RuleSet version. These usually are defaulted when this user creates a new rule instance. The application rule for this user. Optionally, the access roles available to this user. class group A class group is an instance of the Data-Admin-DB-ClassGroup class. A class group instance causes the system to store the instances corresponding to two or more concrete classes that share a common key format in a single database table. The name of the class group is a prefix of the names of the member classes. Class groups are commonly used to cause the system to store instances of similar or related work item concrete classes together in one relational database table. This is known as a work pool. Work pools are referenced in access group instances. Class groups correspond to tables in the PegaRULES database, not tables for external classes. Each external class has a corresponding unique database table instance (Data-Admin-DB-Table class) and so cannot be part of a class group. The Clone a Class Group landing page tab, on the Data Model — Classes and Properties landing page, lets you duplicate the structure of a class group. work group A work group is an instance of the Data-Admin-WorkGroup class. A work group can identify a user who is a supervisor, and a set of workers and workbaskets that report to that supervisor. For the supervisor of a work group, the My Group area of the Process Work space provides quick access to the worklists and workbaskets associated with the group. Work groups are a primary unit of summarization in standard management reports on the Monitor Activity workspace. An Operator ID data instance (Data-Admin-Operator-ID class) usually identifies a work group to which a user belongs. If left blank, that operator can enter work items but not access a worklist or workbaskets. Work group information is not related to the three-level organizational hierarchy consisting of unit, division, and organization. Work group membership does not affect a user's RuleSet list. Stages Stages describe the phases of a case transaction. Use nouns or gerunds (verbs ending in "ing") as a convention. For example, an auto loan case might have the following stages: submission, review, underwriting, and financing. Avoid using verbs that describe an action or a status: order placed, shipped, account paid, and closed. Steps Steps represent a process, an assignment, or another case. Steps are actionable "things to do". The name should describe the step's overall purpose. Use a verb plus a noun convention. For example, an auto loan case might have a step named "Submit Loan Application". Case Types Case types represent business transactions (cases). For example, a lending application might have auto loan, personal loan, home equity loan transactions. In other words, a case represents what must be processed, not how it is processed, which is described by stages and steps. Use a noun convention. Avoid using vague, non-specific names such as Auto or Personal, which do not provide enough context. For instance, in a financial services organization, there may be Auto Loan cases and Auto Accident cases. Data Object Types Data object types are classes that contain the information such as properties and data pages that are necessary for a case to accomplish its actions or achieve its business outcome. In other words, they represent "things." Use a noun convention; for example, Cost Center, Country, and Exchange Rate. Flow Actions Use a verb and noun convention for flow actions, for example, Approve Correspondence. A flow action can reference other records. When associated with certain activities, data transforms and validation records, prefix the flow action name with: Pre — if used with a pre-activity or data transform; for example, PreApproveCorrespondence Post — if used with a post-activity or data transform; for example, PostApproveCorrespondence Val — if used with a validate record; for example, ValApproveCorrespondence When naming other records related to the flow action, follow these guidelines: Section - use the same name as the flow action Privilege - use the same name as the flow action The Feedback feature The Feedback feature needs to be enabled for the application on the DesignerStudio > System > Settings > Project Management landing page. The access role PegaRULES:Feedback needs to be added to the access groups of operators that should be able to send feedback. A case of class Pega-Feedback is created for each feedback submitted and routes it to the workbasket called Feedback. The Feedback workbasket can also be accessed from the Show Feedback button next to the Pin button in the portal. Custom Warnings To add or modify rule warnings, override the empty activity called CheckForCustomWarnings in the baseclass. This activity is called as part of the StandardValidate activity which is called by, for example, Save and Save-As and is designed to allow us to add warnings. ABV branched ruleset An ABV branched ruleset has access to all rulesets in the application including other rulesets in the same branch as well as any other branches also defined in the same application definition. Hence, all branches within a given application can refer to each other. identify any invalid rules in the application. The tool can be started by selecting DesignerStudio > Application >Tools > Validation. Utilize the Validation tool after critical changes/milestones, such as changes to the application ruleset list or built-on application and before lock/export. Copy Merge Ruleset tool With this utility we can: Rename a ruleset Change a ruleset version number Merge several rulesets into one Merge ruleset versions into one version The main difference between copy and merge is that copy always leaves source ruleset versions unaltered. Merge provides us with the option to delete the source ruleset versions. Merging also provides us with the option to modify non-versioned rules, which copy doesn't. Skimming Skimming is the process of collecting the highest version of every rule in the ruleset version and saving it into a new, higher ruleset version. As a result, it applies mainly to Rule resolved rules In addition, there are exceptions to what gets skimmed based on the availability of the Rule as well as the type of skim we're performing. That leads us to the types of skims, there are two types, Major and Minor. This is in concurrence with the ruleset version format (Major-Minor-Patch). During a minor skim, rules are rolled into the higher minor version and during a major skim, rules are rolled into the higher major version. Skimming is triggered from the Designer Studio by clicking System > Refactor > Rulesets. Amongst other ruleset utilities locate and click the Skim a RuleSet link. The "Skim a RuleSet dialog offers choices from which we can select whether it is a major or minor version During skimming, the availability field also plays a key role. The following table summarizes which rules get moved during a major and minor skim. Available Not Available Final Withdrawn Blocked Major Skim Yes No Yes No Yes Minor Skim Yes No Yes Yes Yes Skimming does not delete any rules. Skimming copies but does not update or delete rules in the source versions. After skimming, use the refactoring tool to delete other ruleset versions. It is also a good idea to move only the skimmed version to production. Skimming does not validate the copied rules, nor compile any Java. For rules of rule types that produce compiled Java, compilation occurs when the rule is first assembled and executed. Preparing for skimming 1. Make sure there are no checked out rules on the ruleset versions that are being skimmed. 2. Lock the ruleset versions. 3. Run the Revalidate and Save tool before the skimming operation. This is accessed by clicking System > Release > Upgrade > Validate. Same-case Parallelism Same-case parallelism is when multiple assignments, associated with the same case, are created, and each assignment exists within a "child" or "sub" process that is different from the "parent" process. When a Split-For-Each shape is used, the same sub-process is specified. The following options exist for when the parent flow is allowed to continue based on the sub-process completion: All Any Some Iterate With all four options it is possible to specify a When rule to decide whether or not to create the sub- process when the Page List or Group is iterated. The "All" and "Any" rejoin options are straightforward. The "Iterate" rejoin option offers an "Exit Iteration when" condition whereas the "Some" option provides both "Exit iteration on when" and "Exit iteration on count" rejoin options. The "Iterate" option also differs in that sub-processes are not created in parallel. Instead sub-processes are created sequentially until either the "Exit Iteration when" condition returns "true" or the last sub-process has completed. When a Split-Join shape is used, each parallel Assignment is created within a sub-process that can be and typically is different from each of its siblings. The following options exist when the parent flow is allowed to continue based on sub-process completion: All Any Some Note that when using a Split-Join, the number of sub-processes that are created is decided at design time. With Split-For-Each, however, the number of sub-processes that are created is based on the size of the specified Page List or Page Group. A third type of Business Parallel processing is called a "Spin-off". A Spin-Off flow is configured by checking the "Spin off" checkbox within the Subprocess shape. A Spin-off flow differs from the Split-For- Each and Split-Join flows in that the parent flow is not obligated to wait for the shape to complete. Subcase Parallelism Subcase instantiation alternatives are discussed elsewhere in this course. The primary difference between subcase parallelism and same-case parallelism is the fact that one or more additional cases are created. When subcase parallelism is used, the following options exist if we want the parent flow to wait for the spun-off child cases to complete. We can: 1. Route to a Workbasket Assignment with no Flow Actions. 2. Route to a Worklist Assignment with a single Flow Action that wraps back on that Assignment. 3. Use a Wait Shape. ] For options one and two (above) to continue, a Ticket, typically AllCoveredResolved, must be associated with a different shape usually within the same flow. A Workbasket name must be specified when using the Wait Shape option; deferred@pega.com being the default value, hence it is somewhat similar to option one. The "Case Dependency" option within a Wait Shape supports "Any" and "All" join conditions. The scope of this join condition is a single subcase type having reached a certain status. Note that the AllCoveredResolved Ticket approach is similar to the "All" option with the exception of supporting multiple subcase types while requiring that each subcase achieve a "Resolved" status. Pros and Cons of Same-Case vs Subcase Parallelism Capabilities only available when using subcases Dependency Management: A subcase can be configured to launch based on its parent's state as well as the states of one or more sibling cases. No such capability exists with same-case parallelism. Data Propagation: The amount of data made available to a child case can be restricted. However, that propagated data can become stale since it is a copy. Within same-case parallelism, data propagation does not exist. Ad-hoc Processing: A subcase can be used to perform ad-hoc processing at run-time. There is no concept of ad-hoc processing with regard to same-case parallelism. Case Designer-mediated Specialization: Cases can be circumstanced using the Case Designer. Though it is possible to use circumstanced flows during same-case parallelism, the capability to circumstance flows is not supported by the Case Designer. Class Specialization: Being a work type, subcases can utilize class specialization unlike flows used in same-parallelism. Advantages of using subcases over same-case parallelism Security: Subcases offer more control over security. The need may arise for certain work to only be allowed to be performed and viewed by persons with different or greater privileges than the originator. In rare situations the need may also exist for the individuals who are performing a certain type of work to not be allowed to know who originated it. This type of security is more difficult to control with same-case parallelism. Note that work can be "pushed" to different persons using either approach provided those persons are known to possess the requisite roles and privileges to perform the work. Also, with either approach, spun-off work can be routed to a Workbasket that enforces that each requestor has the proper Access Role. Parallelism / Locking Strategy: A spun-off subcase can be made immune from its parent's locking strategy, for example, by overriding DetermineLockString. With either approach, Optimistic Locking can be used. Reporting Granularity: When requirements exist to measure/monitor the efficiency/quality of work completion at a fine-grained level, yet greater than a single Assignment, the subcase approach is superior. Async Comm: A spun-off subcase that is immune from its parent's locking strategy can call the Queue-For-Agent to invoke an Activity that invokes a Connector. A Standard Agent Batch Requestor can then attempt and re-attempt the connection in parallel to the parent case with no concern whether the parent case is locked. With the same-case approach, the Standard Agent must wait for the case lock to be released. Advantages of using same-case parallelism over subcases Attachment View-By-Child Capability: A parent case can view all child case attachments. However, extra logic is required to avoid Data-WorkAttach- duplication should a child case need to see a parent case attachment. With same-case parallelism, every child process can view every attachment. Reporting Simplicity: Because data is captured within the same case when using same-case parallelism, no need exists for a report to join to the subcase when reporting at the parent case level. Alternatively the subcase would need to update its parent to facilitate reporting. To some extent, the Case Designer-mediated Calculation mechanism can be used to reduce this complexity. Policy Override: It is more complex to manage "Suspend work" when multiple cases are involved hence same-case has an advantage in this respect. Process Simplicity: When the actions required from different users take very little time to complete, and temporary locking is a non-issue, OOTB solutions such PartyMajorApproval can be used that are much simpler than implementing the same functionality using subcases. Sub Case Paralleism When the subcase approach is used, the question of what data the subcase is allowed to "see" arises. With the same-case approach, all data is visible. Ad Hoc Case Typically, we use the standard flow action pyCreateAdhocCase as a local or connector action to create ad hoc cases. When the action is submitted, the standard Simple Case flow (pySimpleCaseWorkFlow) instantiates an ad hoc case called Simple Case of class group Work-Cover-SimpleCase. The Ad Hoc Case Dashboard action references the section pyAdhocCaseActions, which we can extend. The Add Tasks icon launches a local flow action pyCreateAdHocTasks in which users add tasks to the ad hoc case in the pyAdHocProcessesSequential section (also extensible). Each item is processed in sequence, starting from the task on the top row. The first deadline defaults to one business day from when the task was entered. The second task's default deadline is one day from the first task's default, and so on. The tasks are processed in the list order regardless of the deadline dates the user enters. A task is processed as a Complete Task assignment in the standard Complete Task flow (Data- AdHocTask.WorkTask). The More Tasks Split-For-Each shape iterates across the Tasks page until all tasks on the grid are completed. The Ad Hoc Case Dashboard assignment remains open until the user manually resolves the ad hoc case. Users can add top-level ad hoc cases in Pega Pulse posts by clicking Actions and selecting the Create a Task option. The option launches the pyCreateAdhocCaseFromPulse flow action. When submitted, a link to the case displays on the post. To create a case type from an ad hoc case Users have the pyCaseInstitutionalize privilege (included in the standard pyPega- ProcessEngine:CaseDesigner role). The current application contains an unlocked ruleset version. Process API Pega provides a collection of standard rules which can be directly reused or overridden in your application for various operations. There is a collection of standard rules provided which can be directly reused or overridden in our application for various operations. These standard rules include mainly activities but also a few flows that can be used to create cases, advance the cases through a flow, perform assignments and so on without user interaction or user forms. This subset of the standard rules is known as the Process API. Engine API The Engine API is a java based API which offers a collection of interfaces. Some rule types, such as functions, edit input, edit validate, and activities allow us to leverage the Engine API by directly entering Java source code into the rule forms. However, always keep in mind that we use Java source code only when necessary. Typically, we would use the keyword "tools" to identify the PublicAPI interface and access its functionality. For example, tools.findPage("myPage"), allows us to access the findPage method of the PublicAPI interface which returns a ClipboardPage object referencing a page in the current Thread named "myPage" or null if there is no such page in the context of the current Thread. Updating Production Flows Here are some examples of updates that might cause problem flows: moving: pxTaskName — the shape ID of the assignment shape to which it is linked pxTaskLabel — the developer-entered text label of the assignment shape. pyInterestPageClass — the class of the flow rule pyFlowType — the name of the flow rule The pzInsKey of the flow rule, which uniquely identifies the rule, is not stored on the object. Changing or removing the related shapes or flows will likely cause a problem. Removing: Approach 1: Revert the user's ruleset to the original, lower versions To allow users to process existing assignments, add a new access group that points to the old application. Then add the access group to the operator ID so that the operator can switch to the application from the user portal. Advantage: This is the only sure approach when changes between versions go beyond just the flow rules. Drawback: There may be unintended consequences, where desirable fixes in the higher ruleset version aren't executed because the user's ruleset list is too low to include them. Approach 2: Process existing assignments in parallel with the new flow This approach keeps the placeholder shapes (Assignment, Wait, Subprocess, Split-For-Each, and so on) that we are changing or deleting in the new flow. Reconfigure the new flow so that new cases never reach the old shapes, but existing assignments still follow the original path. Advantage: All cases use the same rule names across multiple versions. Drawbacks: This approach may not be feasible given configuration changes. In addition, it may result in cluttered Process Modeler diagrams. Approach 3: Add tickets to control processing of existing assignments In this approach, tickets are used in the newly modified flows to control where the processing of each type of old assignment is to resume processing. Run a bulk processing job that finds all the outdated assignments in the system. For each assignment, bulk processing should call Assign-.OpenAndLockWork, and then call Work-.SetTicket on the work page. Advantage: Leaves the flow rules clean. Drawbacks: It might be impractical if the number of assignments is large, or if there is no moment when the background processing is guaranteed to acquire the necessary locks. As we reconfigure and test our flows, identify and manage problem flows on the Flow Errors landing page by going to Designer Studio > Process & Rules > Processes > Flow Errors. This report lists flow errors that are routed to our worklist or work group in our current application by a getProblemFlowOperator activity. Each row identifies one flow problem. Rows may reflect a common condition or unrelated conditions from multiple applications. Use the following features to fix problem flows: Resume Flow if we want to resume flow execution beginning at the step after the step that paused. Retry Last Step to resume flow execution, but begin by re-executing the step that paused. Restart Flow to start the flow at the initial step. Delete Orphan Assignments to delete assignments for which the work item cannot be found. Remember: Always test updated flow rules with existing work objects, not only newly created ones. When an operator completes an assignment and a problem arises with the flow, the primary flow execution is paused and a standard problem flow "takes over" for service by an administrator who determines how the flow is resolved. Pega 7 provides two standard problem flows: FlowProblem for general process configuration issues as described previously, and pzStageProblems for stage configuration issues. Important: As a best practice, override the default workbasket or problem operator settings in the getProblemFlowOperator routing activity in our application to meet our requirements. Problems due to stage configuration changes, such as when a stage or a step within a stage is removed or relocated within the Stages & Processes diagram. When an assignment is unable to process due to a stage-related issue, the system starts the standard pzStageProblems flow. The form displays an error message and the problem flow assignment. To resolve, the problem operator selects the Actions menu and either cancels the assignment or advances it to another stage. As a best practice, do not remove stages when updating your designs. Consider keeping the stage and its steps as they are. Use a Skip stage when conditions in the old stage's Stage Configuration dialog prevent new assignments from reaching the stage. Locking The "Allow Locking" checkbox on a class group definition determines whether locking is enabled for every class that belongs to the class group as well as the "work pool" class itself. This is the only way to enable locking for cases. For any concrete class, which does not belong to a class group, its own configuration setting "Allow Locking" defines, whether the system will be locking open instances of the class when the lock is requested. By default "Allow Locking" is not checked for those classes. if we create a Data Type first and do not create records for it, the underlying data class is configured by default and. locking is not allowed. However if we define a key property for the type under the Records tab, the system automatically reconfigures the data class to include the key property added and to allow locking. Pega only issues locks on instances initially saved and committed to the database. So, prior to requesting a lock, make sure the object is not new but has been saved and committed. The last condition that must also be met is the requestor's access role should convey the privilege needed to perform the operation for which the lock is being requested. Once all these configurations are properly defined, the system is able to issue a lock for an object when requested. Locks are commonly requested from activities using one of the three methods: Obj-Open, Obj- Open-by-Handle and Obj-Refresh-and-Lock. The first two methods must have their "Lock" parameter checked in order for the system to issue the lock. Beside these activity methods, Pega also provides a standard activity Work-.WorkLock to be used to request a lock on a work item. Pega implemented the locks as instances of the System-Locks class and persist them into the pr_sys_locks table in the database. A lock is exclusive to one Pega Thread and operates system-wide in a multi-node system. When a requestor obtains a lock on an object through one Pega Thread and attempts to access the same object through a different Pega Thread, the system presents the "Release Lock" button for the second Thread. The requestor must click on this button to release the lock from the previous Thread before another lock can be issued for the new Thread. Once issued, a lock is held until it is released. And a lock can be released in different ways. A commit operation typically releases the lock automatically if the method used to acquire the lock has specified to release the lock on commit. The "ReleaseOncommit" check box of the methods must be checked. This field should always be enabled when opening an object with a lock unless there is a special requirement where we need the lock even after commit. A lock is also released by the system when the requestor who owns the lock explicitly logs out but not when the requestor terminates the session by closing the Window. The system also automatically expires locks after a preconfigured timeout period. By default, the Data- Admin-System data instance sets the lock timeout to 30 minutes which can be modified. An expired lock also called a "soft" lock remains held by the requestor until the requestor releases it or until the requestor session ends. However, once the lock is soft, it can be acquired by another requestor who requests it. A requestor can only release locks held by its own session. In v6.3 and beyond,) which communicates through the system pulse across all nodes. Pega also provides the "Page-Unlock" activity method to release a lock on an object. In fact, Pega uses the properties listed to build the lock string. For a case not associated with a class group, the class name concatenated with the properties listed in the "Keys" area of the "General" tab are used together to build the lock string. In some rare situations when a non-standard lock is needed for the class, the list of properties to use for the lock string is provided on the "Locking" tab of the class rule. If the instance to be locked is within the scope of a class group, the system uses the standard activity Work-.DetermineLockString to determine the lock string or lock handle. As defined by this activity, the lock string is either the pzInsKey property value of the object or its cover's pzInsKey property value. The logic which defines the value for the lock string relies upon the setting "Do not lock the parent case" for the case type in Case Designer. It is a best practice to use this default implementation of DetermineLockString activity. For example, if we have a work item with a cover item and it is locked, all other work items covered by the same cover item are automatically locked since they all share the same lock string which is the pzInsKey property value of their shared cover. In fact, if the cover itself does not have a cover, then its lock string is simply its own pzInsKey value. Now, remember that is the same value used as a lock string for the covered item. So locking the covered item automatically locks the cover itself as the system is not able to build the same lock string again as long as that first lock is properly held. This behavior is quite often used in Case Management where a case may be defined with one or multiple subcases. In the parent-child relationship, it may be critical in some circumstances to prevent any update to the parent case while its subcase is being updated. However, other circumstances may require that the locking bound between parent and child case be broken in some way, not covered by case type locking configuration. In such a case, we need to specialize DetermineLockString activity. So we specialize it for a particular class in a relevant ruleset and replace the "PropertiesValue" field for pxLockHandle by anything we deem appropriate for our application. Data Model In a general sense polymorphism makes the application more flexible and easier to change. Polymorphism often leads to more reuse which in term leads to faster development and less errors. Another benefit is that when properly used polymorphism can improve the readability of your rules. A reference property is a type of property that acts like a pointer to another property or page. Another way to think about it is as an alias for a property. To make a property a reference property we need to go to the Advanced tab and simply click the checkbox. Reference properties are most commonly used to link related pages within a work object. They can be used to link other top level pages but this requires special care as the developer is responsible for making sure the page is available on the clipboard when the reference property is referenced. At runtime, using the Property-Ref activity method the PrimaryDriver page can be linked to the applicable driver page in the DriversOnPolicy page list property. This allows us to establish a relationship without copying any data. The Property-Ref method is pretty simple. On the left we list the reference property and on the right the page or property we wish to map to. We are able to refer to these properties using the same syntax as if this was a regular property. Once linked the references are maintained until the link is explicitly broken or changed using the Property-Ref method. Property references cannot be circular. In summary, reference properties are not commonly needed. However, in more advanced data structures that require the linking of various embedded entities they can be very powerful. They can help improve runtime performance and make design time easier as well by making property references simpler and more intuitive. Data Pages SOR Pattern The System of Record (SOR) pattern describes a situation where our case needs to access data related to a case that is stored in another system or application. In most situations the case doesn't own the referenced object but rather may display data for context or use data in rules. For example, a loan application or a credit card dispute may need to access the customer's account information and history. Another common trait of this pattern is that the case needs to have access to the most current data. For example, if the account holder's phone number changes we want that to be reflected when the data is accessed from the case. Usually, the data loaded comes from an external data source. Let's have a look at how we can implement this pattern in a claims application for the customer account information. We start with the D_Customer data page, which represents our customer data. The data is loaded from a SOAP connector and the customer ID is passed in as a parameter to the data page. Snapshot Pattern In the snapshot pattern the case does not point to a data page but instead the data from the data page is copied into the case when the data is accessed. Once the data is copied into the case the data page is not accessed on subsequent property references. This pattern is especially useful when the data needs to reflect a specific point in time. For example, an insurance claim may want a copy of the policy data as it exists when the claim is filed. If the policy changes AFTER the claim we DON'T want it updated. This is the opposite of the SOR pattern we discussed earlier. However, if the parameters used by the data page change, the data is copied into the case again. In our claims application we configure the policy property to copy data from data page. Since the data is stored in the case it is persisted into the database with the case, making it available for reporting. Reference Pattern The next pattern we'll look at is one of the most common and simplest patterns. We call it the reference data pattern. In this pattern we need to reference a list of data that is usually not directly connected to a given case. This could be a list of products, or countries, or perhaps a list of valid values for a drop down. In many cases the same list can be used by other cases or even other applications. In many cases the list is used to populate UI controls. One permutation of this pattern is where the list needs to be filtered based on the selection of a previous value. For example a list of cities may be populated based on a selected country. Let's look at the configuration of using two data pages to implement these types of cascading selects now. The first data page with the country list is loaded via a report definition from the local data storage. Since this list can be shared by all users we can make it a node level page to improve performance. Also, since this list is not based on any input it does not require any parameters. Keyed Access Pattern The keyed access pattern is not as common as the previous patterns but when appropriately applied this pattern can significantly improve an applications performance and maintainability. The primary aspect of this pattern is that one data page can be utilized as both a list and a single page. All of the data is loaded into a single list data page during the initial load and then can subsequently be accessed as a single page via an auto-populating property. This serves as alternative to having two separate data pages. This makes management simpler and can also improve performance. This pattern can be useful when the entire dataset we are working with can be loaded in a single service call and stored efficiently. It is also useful in cases where users may need to frequently switch back and forth between pages in the list. Preload a Data Page One of the primary benefits of data pages is that they automatically manage the loading of data, taking that responsibility away from the consumer of the data page. Sometimes the data pages takes time to load which may negatively impact the customer experience. In such situations, we may want to proactively load the data before it is actually needed. For example, when a customer contacts a customer service representative it is highly likely that the customer's account and activity information will be needed to properly service the customer which may take time to load and aggregate since it often resides in multiple external systems. Rather than waiting to load the data until it is actually needed, we could load it while the representative takes a few moments to verify the customer's identity and determine the intention of the call. To accomplish this from a technical perspective, an explicit call is made to the necessary data page in an activity using the method Load-DataPage, which causes the data to load. If at any point, we require the data to finish loading before proceeding, we can use the Connect-Wait method to force the system to wait for a desired period of time before proceeding or return a fail status if it does not complete in a timely manner. Both the Load-DataPage and the Connect-Wait methods have a PoolID parameter which makes it possible to pair a Load-DataPage method with the Connect-Wait method by matching their PoolIDs. Before using these methods, be sure to understand the performance gain to ensure it outweighs the cost of loading these pages procedurally and thus, maybe sometimes unnecessarily. Configure Error Handling for Data Pages Data Page errors are treated as any top-level page errors. A message on the property stops flow processing if it is visible on the client. A page message on the other hand does not stop flow processing. If the Data Page is referenced to auto-populate a property then both page and property messages propagate from the Data Page and blocks flow processing from moving forward. Use the post load processing activity on data pages to handle errors. The ConnectionProblems flow defined on the connector never gets invoked by a data page because the data pages catch all exceptions and add page messages so that error handling can be done in the post-activity. First check for technical errors at the data layer and handle them if possible so that the messages can be cleared. Leave unhandled errors on the page so that it can be handled at the work layer. Remember to set the message on a case property visible on the client to block the flow if auto-populate is not used. Triggers and On Change Both rules allow us to execute activities in response to events in the system. Both rules allow for monitoring property changes that are to be part of the tracked events. And both run on the application server. This is important as Declare Triggers are sometimes confused with Database triggers which run on the database. Declare OnChange rules are sometimes confused with OnChange JavaScript events which run on the browser. Triggers and OnChange rules differ in some significant ways as well. Triggers are associated with persistence related events, for example, when objects are saved, deleted or committed. Triggers can execute their activities asynchronously, as well as track the previous values of properties. These features are all unique to triggers. OnChange rules on the other hand, are fired purely based on changes to the clipboard. No persistence of the object is required. This makes them especially useful in pure business rules engine applications which often cannot rely on persistence events. Finally, OnChange rules help drive a unique BPM feature, Policy Overrides. Policy Overrides allow for the dynamic and declarative override of a flow, based on changes to the data on the clipboard. This is covered in more detail in a separate lesson. Trigger and OnChange rules both help to solve some common business requirements. For example, one of the more common requirements is property auditing; where we need to track and sometimes take action when critical properties are changed. Or perhaps users need to be notified when a property goes over a certain threshold. Another common use case is when integrating with systems of record. We can utilize triggers to synchronize data with an external system of record. In applications with complex calculations OnChange rules can be used to execute a calculation activity when values change. Most of the use cases we just discussed can be implemented without these rules in a more procedural way. However there are some key benefits to using declarative rules in this manner. Since these rules are declarative they are executed by Pega 7 reducing the chance that a developer forgets to call them. This is particularly helpful in creating applications that are built for change as we can define the policy and let Pega 7 enforce them at an engine level. This leads to an application that is easier to maintain and debug. Let's take a look at how a trigger rule can be used to track a specific property. This is also known as Field Level Audit Pattern and this can be created automatically using the Field Level auditing landing page (accessed by clicking Process Management > Work Management > Field Level auditing). The Field Level Audit gadget creates a trigger and a data transform rule. The trigger rule named pyTrackedSecuityChanges is created in the appropriate class. Now let's talk about all the configurations we need to create if we are going to create a trigger rule that performs other tasks than tracking properties. In the trigger rule, we have other choices for when the trigger rule gets executed. Let's look at the rest of these choices: Deleted — executes the trigger whenever the instance that belongs to the Applies to class or a descendent of that class, is deleted using Obj-Delete. Committed Save — gets executed when the saves are committed to the database. Committed Delete — gets executed when the deletes are committed to the database. Saved and — executes when an applicable object is saved using Obj-Save AND one of the listed properties has been modified since the last save. Note: Since Pega 7 normally defers the committing of saved and deleted objects, these two events can occur at different times in the process. Activities called by a trigger rule should be of type 'Trigger'. This is set in the Security tab on the activity rule. A trigger activity can be run immediately or in the background. When running in the background the primary page is copied to a separate child requestor and run asynchronously. While this can be useful in specific situations it is generally not advised as troubleshooting activities run asynchronously can be challenging. Triggers also allow us to specify a page context. This page context allows a trigger to run for each page in an embedded page list. For example we can specify SelectedCourses and the appropriate class (SAE- HRServices-Data-Course). As shown, the trigger activity runs for each page in the line item list. Note that while the activity is run for each page in the list the Applies To class of the activity is still expected to be that of the trigger rule (SAE-HRServices-Work) and NOT the page context. In practice the use of Page Context on triggers is rarely implemented. Pega 7 creates a clipboard page named pyDeclarativeContext that is available during the life of the trigger activity. This page is of type Code-Pega-DeclarativeContext and has a value list of the changed properties. In some cases it may be useful to programmatically examine this page to see which properties caused the trigger to execute. OnChange rules execute based on a property instead of an event that occurs on the database. In an OnChange rule we can add multiple properties and when multiple properties are listed a change to any one property causes the action logic to fire. To determine which property or properties changed we can examine the pyDeclarativeContext page as previously discussed. The conditions section allows us to define a when rule as well as the action. There are two actions allowed, Calling an activity and Suspending Work, which is also known as Policy Overrides. Policy Overrides are a unique feature of OnChange rules and allow us to declaratively alter the processing of work. This lesson does not discuss Policy Overrides in detail. If we select the Call activity action, we can specify an activity based on whether the when rule returns true or false. If no when rule is specified the "when true" activity runs. The Security tab of this activity is set to type OnChange. OnChange rules, unlike triggers, execute an activity based on changes to the properties on the clipboard and not the database or persistence events. These changes are tracked using standard forward chaining logic. Activities of type OnChange do not fire other forward chaining declarative rules, such as expressions, during the activity. The forward chaining rules are executed after the OnChange rule completes. This avoids any infinite loops. Like triggers, OnChange rules can specify a page context so that the rule applies to all elements in a list. However, unlike triggers when using a page context the activity called is expected to be of the page context's class not the Applies To class of the rule itself. Custom Functions Functions are rule types written to extend the capability of the Pega 7 system and like other rules in the system they belong to a ruleset and can be versioned. However they do not apply to a class and cannot be circumstanced. Functions provide an extremely powerful way for us to implement custom functionality. Though it is very powerful it also has a downside as we can easily erode many of the advantages of a Pega 7 based application if functions are overused. Functions, as compared to model driven rules, are not Built for Change® and can be challenging to debug. With that said utility functions are preferred over adding java steps in an activity since they clearly isolate the java code in a separate rule and can often be made more reusable. Before creating custom functions we need to create a library where this function can be grouped. The library can optionally include all Java Packages used in the functions belonging to this library. We can also access the PublicAPI interface part of the Engine API in the function rule, the system recommends using the keyword tools to access the PublicAPI interface. Collecions. The Pre/Post Actions tab allows you to perform an action before and after the rules in the collection run. These actions may simply be to record the start and completion of the Collection execution. A typical use of the "Before This Collection" section is to initialize a return value from the Collection. Why not just use an Activity rule to do these types of evaluation? Many of the same features are available in the Activity rule. However, the Collection rule has been designed for the rule engine processing and offers these benefits: The Collection form itself makes it easy for the business to maintain, along with the decision and declarative rules that are part of the Collection Function aliases can be used to make form easier to read and define. Response actions implement common patterns without activities. Improved performance over old collection rules and flows for rule orchestration. Collections can also call other collections. In a strictly rules engine based environment where there is no user interaction with the case, the ability to create a network of collection rules provides a means of providing robust processing logic, all of which can be maintained by the business owner. Delegating Rules to the Business User Please note that flows and UI rules are excellent candidates for business users to work on during the initial development phase, however once the application is in production these rules becomes more challenging for a business to maintain. Delegated rules internally get saved in the same class where favorites are saved. Each time we delegate a rule it creates or updates an instance of System-User-MyRules. If we look closer, we see that they are marked either as Individual or access group.. If users have a different portal access then we can use the My Rules gadget. This gadget is accessed in the standard Case Manager Portal by clicking the Options> My Rules. From there users can expand the gadget and see the delegated rules. When they attempt to open the rule a separate window opens. Out of the box interfaces are just one option, but if we are working with highly specialized requirements we can use the MyRules instances to provide a custom interface. Since these instances are data instances we can easily use them programmatically to build custom interfaces for our business users. Delegated users can work on these rules in various environments and there is no one right way to handle rule delegation. It is important to consider various factors and find an approach that is best suited to the application and the users who are making the changes. 1. In Development: When we delegate rules in the development environment the rules are changed and managed by the business but the promotion of the rules follows a normal product delivery lifecycle. This approach has the least risk but provides the least agility. It is useful when the changes can wait and are not time bound. It is a safe approach however it adds complexity since it adds a dependency that requires migrating rules to see the changes. This defeats the purpose of delegation. 2. In Production: On the other end of the spectrum is managing rules directly in production. This approach has the most risk but provides the most agility. This is useful when there is a small set of volatile rules that can be managed with minimal risk. We can minimize the risk by using the check-in/check-out feature which allows users to test the rule before having it used by others and ensures some kind of approval process is built. (We will learn about this more this shortly.). This approach is used in most cases, due to the quickness in which the changes can be seen. However, it must be remembered that this option requires planning and risk mitigation but provides the business with a lot of agility. 3. In a Separate Environment: A nice compromise is to setup a separate authoring environment. Here rules can be managed by the business and tested without the risk of affecting production in any way. Once tested the rules can be promoted into production on a separate cycle from the standard development environment. This approach though it looks ideal, it may not be practical because we need to setup a separate environment just for delegated users to work. This approach can be made easier by setting up a cloud instance, thereby removing the huge overhead in terms of cost. It is highly recommended that we use a Check-in approval process especially when the users are making changes directly in the production system. The production ruleset (which is typically used in this case) should use the check-out option so that multiple users cannot update rules at the same time. Check-out also helps us in implementing an approval process The production ruleset must also be set to Enable Approval Required field which is set in the Versions tab. After making these changes in the ruleset we need to: 1. Include Work-RuleCheckin as a workpool in the access group of the users. 2. Enable Allow rule checkout for each of those Operator IDs responsible for making changes. 3. Add the ruleset named CheckInCandidates to the Access group for the operators who are approving the check-ins. 4. Make sure the operators who are approving the rules get an access role with the privilege UpdatePrivateRuleSets. Pega 7 ships a standard check-in process already implemented for us. We can review it and make changes. We can also customize the flow in the application ruleset that is different from the production ruleset to use a different process. Chaining Expressions When discussing expressions, the key concept to understand is change tracking. Forward chaining indicates that the expression is computed when any of the source properties change. For example, Total = Quantity*Unit Price, the total gets calculated when Quantity or unit price changes. However, if you request Total before either of them has a value then the expression does not get calculated. In addition to the properties used in the expression we can also identify additional properties in the additional dependencies field. The order in this array is not significant and the expression is calculated when any of these properties change. Backward chaining as the name suggests computes the expression calculation based on the target property. This can be set in one of the 3 ways: 1. When the target property referenced does not have a value 2. When the target property is not present in the clipboard 3. Whenever the target property is referenced Obviously each of these has its own use but the system generates a warning when we use "Whenever used". This option, however can be used in some cases such as when the expression involves values from properties on multiple pages or when the property is not referenced in many places in the application. Declare expressions can be invoked procedurally by using a collection rule. When creating a collection rule, we can include a declare expression along with other rules. If the declare expression is included in a collection rule then the declare expressions rule should use the option invoked procedurally. Pega versions prior to 7.1.6 do not have this option. If the declare expression used in the collection rule is chained to other expressions then we should use the option "When applied by a rule collection". This option is there in the product to support this use case and also for backward compatibility. In addition, we call the utility function defined as part of the application which makes declare expressions extremely powerful. When using functions unexpected behavior can arise causing issues especially when using forward chaining. Forward chaining when used by expressions determines which properties the system should watch for changes. The list of properties to watch is determined when the expression is saved by reading the rule data and looking for property references. When the function dynamically refers to a property or the property is referenced indirectly as a string, as opposed to a property reference unexpected behavior can arise. In other words if your property reference has quotes around it there may be an issue. Let's look at some examples of how expressions can be written and the effect it has on change tracking. In some cases it may be difficult to pass all the property references and values in a way that works with forward chaining so we may need to use backward chaining. Goal Seek Pattern This pattern is used to ultimately calculate or determine the value for a single decision. For example, the price of a quote or the acceptance of a submission. This property is the "goal" in goal seek. The pattern uses backward chaining expressions to determine what values are missing to determine the goal. For each value that is not available the system can prompt the user for its value. This is the seek part of goal seek. Goal seek pattern is useful when we need to seek values for one of these dependent properties. For example, assume we are calculating the expression which uses discount and if it does not have a value, so the total price does not get calculated. The pattern utilizes backward chaining expressions to determine what values are missing to determine the goal. For each value that is not available the system can prompt the user for its value or we can procedurally provide a value. Pega 7 provides two standard flow actions, VerifyProperty and VerifyPropertyWithListing, as examples of goal seek. We need to add the flow action in the flow either as a connector action or on a control such as a button. The system is adaptive enough to prompt users only on properties that do not have values, so for example it can ask them to enter values for Discount, if discount does not have value. We can configure what appears as the label using the short description field. After defining the properties and expressions, the next step is to include the standard flow action VerifyPropertyWithListing. For the demonstration purposes, we created a sample flow with just this flow action followed by another flow action to display the result. The standard flow action VerifyPropertyWithListing has to be modified to change the goal seek property in both pre and post actions. When we use one of the standard flow actions for the goal seek pattern, the runtime screen presented to users displays the value entered in the short description field. It makes sense to set the short description of all other fields that are involved in the calculation. In most cases the requirements dictate if we could use the standard flow action itself or if we need any additional customization. In summary, explore what's under the covers of the goal seek pattern as it helps to further understand the rules engine by looking at the flow action rule itself and the activity it uses. Note, that the flow action had a warning, because the standard flow action uses a deprecated rule type. Keep goal seek in mind the next time you find yourself manually creating a screen that feels like a questionnaire or that is duplicating logic already in trees and tables. Goal seek is simple to use, so experiment with it and see if it fits your applications needs. SLA - needs review The ServiceLevelEvents agent is defined as a Standard agent and by default runs every 30 seconds. No attempt is made to process a System-Queue-ServiceLevel queue item until the current DateTime is greater than or equal to the value of the queue item's pyMinimumDateTimeForProcessing property. Passing that test, no attempt is made to invoke the ServiceLevelsAgent's ProcessEvent activity unless a lock can be obtained against the queue item's associated case. If the case happens to be locked, the pxErrorList of the queue item is set similar to the example below; the pxLastExecutionDateTime will also be set to the time when the lock was attempted. <pxErrorList REPEATINGTYPE="PropertyList"> <rowdata REPEATINGINDEX="1">Cannot obtain a lock on instance PA-FW-GADMFW-WORK C-361, as Requestor H6C79350BBEDA482ACCD28F1C4AD5F1F1 already has the lock</rowdata> </pxErrorList> A System-Queue-ServiceLevel instance has a pxEvent property, the values for which are limited to "Goal", "Deadline", and "Late". Whenever an assignment is created, the Assign-.AddAssign activity called. If a ServiceLevel rule is configured against the assignment, AddAssign creates a ServiceLevel queue item with the value of pxEvent set to "Goal". When the ProcessEvent activity is eventually called, a check is made to determine whether the current queue item should be permanently dequeued, in other words, the ServiceLevel has run its course. If not, a new queue item is constructed using values from the currently examined queue item. The value of the new queue item's pxEvent property is set using the ternary-expression statement shown below. .pxEvent = Primary.pxEvent == "Goal" ? "Deadline" : "Late" At the end of the ProcessEvent activity, the ExecuteSLA activity is called. The ExecuteSLA activity is where the Assignment's ServiceLevel rule, if any, is opened and processed. An assignment stores the name of its associated ServiceLevel rule within its pxServiceLevelName property. An examination of the System-Queue-ServiceLevel class shows a number of Properties whose names begin with "pxGoal" and "pxDeadline" which is by design. The main purpose of the ExecuteSLA activity is to recompute the assignment's urgency as wells as to execute the list of escalation activities, if any, associated to the type of event that has transpired. Work SLAs The OverallSLA flow is a special flow that runs in parallel to any case that configures the ".pySLAName" property when created. The Case Designer supports this value being set within the "Case Details" tab. The value of "pySLAName" is the name of the ServiceLevel rule to be invoked by the single workbasket assignment within the OverallSLA flow. The name of this workbasket is expected to be "default@" + lowercase(org name). The value of the case's ".pySLAName" property is transferred to the OverallSLA Flow workbasket assignment's ".pxServiceLevelName" property. It is also possible to define ".pySLAGoal" and ".pySLADeadline" DateTime properties on each case. For these properties to take effect, the ServiceLevel rule represented by ".py6/AName" must be configured to use those properties as the Goal and Deadline times, respectively. There may be some confusion about two of the options used to configure an SLA's "Assignment Ready" value, namely "Dynamically defined on a Property" and "Timed delay", particularly the latter. Setting the SLA start time in the future does not prevent the assignment from being completed or, in the case of the overall SLA, prevent the case from being worked on. The effect of using these options can be observed when assignments are processed back-to-back. Even though the user may have permission to perform the second assignment, if the start time for SLA is in the future, a Confirm harness is displayed. That user however, can still perform that assignment should they choose -- there is no penalty for completing work early. The "Assignment Ready" SLA value is set into Assign-.pyActionTime. The values for a case's pySLAGoalExecute and pySLADeadlineExecute DateTime properties are strictly set by overall SLA workbasket assignment ServiceLevels. These values are used by various case-wide "Quality" and "Processes" reports. "Quality" and "Processes" reports report against case tables. The difference between the two is that "Quality" reports report against resolved cases (see:) whereas "Processes" reports report against unresolved cases (see:). As explained in Help, the System record defines the duration of a lock timeout, the default value being 30 minutes. If a case is idle for 30 minutes, someone or something else can open it and "steal" the lock. That "something else" can be the ServiceLevelEvents agent. Help does say "However, even after a lock is marked as soft, the lock holder retains the lock and can save and commit the updated instance. What this means is that the lock holder retains the lock provided someone or something else has not "stolen" the lock. Suppose the case is parked at an assignment with a ServiceLevel rule that defines an escalation such as "Advance Flow" or "Transfer"? The result of the escalation action could very well result in the case no longer being owned by who is now a former lock owner. Another concern with SLAs and locking is that the ServiceLevelEvents agent cannot act on a work object that is currently locked. Suppose a user forgets to close a case or logout of their application, instead the user either has either left the browser in its current state or has closed it. Suppose also that this scenario has taken place just prior to the Deadline time for that case where the Deadline has specified one or more escalation actions. The result would be a 30-minute delay before the ServiceLevelEvents agent can perform those escalation actions. Whether a 30-minute delay is significant depends on the business use case. Suppose the goal and deadline times are very short; a customer is waiting for a response. One approach to dealing with this situation is to define a shorter locking timeout for time-critical cases using the Case Designer "Detail" tab's Locking strategy screen. Note, however, that the above setting affects every assignment within the case. In extreme situations, it makes sense not to rely solely on a ServiceLevel rule being directly associated to a sub-hour, time-critical case Assignment. A possible alternative is to spin off a subcase prior to the timecritical Assignment. Either Case Propagation could be used to communicate the values of the parent case's Goal and Deadline Properties, or the child case could refer to those properties directly using "pyWorkCover". The child case's locking strategy would be defined such that it is not locked when the parent case is locked, for example the DetermineLockString activity is overridden and defined as ".pzInsKey". If the parent case completes prior to its deadline, it could implement a strategy similar to the UpdateCoveredTasks Declare Trigger that sets a ticket against open subcases. If, however, the subcase reaches the deadline first, it could send one or more notifications that the assignment is running late. Other escalation actions are possible but may run the risk of uncommitted, browser-entered data being lost. Controls Pega 7 comes with several auto-generated controls that can be used as-is without any modifications. Most of these controls come with parameters that help in using them in varied scenarios. There are broadly three different control modes as shown here in the control rule definition. 1. Editable/Read-Only - use to present values in both editable and read-only modes. 2. Read-Only - use to present values in read-only mode 3. Action - use when the user has to click for an action to occur Let's take a look at the Options field for the Button, which is the most commonly used control. The button has a label that appears as a tooltip when you hover over the button. We can also disable the button on a condition or use a privilege to show the button based on user role. Buttons also offer styling options - the format can use one of the various formats defined in the skin rule. By default, three style formats are supported for buttons and additional formats can be defined in the skin rule. The image source field in the button control can reference a binary file rule (if simple image is selected), a property which has the image path (property) and the Icon if using Icon Class. The options vary by the action control that is selected. So for example, a Signature shows only a tooltip and can be configured to disable the control based on a condition or can conditionally be shown or hidden based on a privilege. Let's now talk about Read-Only modes. There are two types of controls available in Read-Only mode; Text and Hidden. If Text is selected, it presents other formatting choices including the option to choose a style format defined in the skin rule. The type determines the format options that can be configured, for instance in the case of Date, the system provides the options to determine the format in which the value is presented. This is extremely powerful because we can use a single control to display dates in different formats by configuring the format in the field where the control is being used. If we are using a True/False property, we can customize the label of true and false as shown here. Or we can even use an image to indicate true or false. Text Input This is the most commonly used control in user screens to enable users to enter values in a text box. It offers various choices including selecting a style from the skin rule, specifying the size in parameters, as well as the minimum and maximum values that a user can enter. The format area helps in customizing how the value is presented in the screen. The choices in the left are editable and the choices in the right are in read-only mode. For illustration purposes, we have selected Number which display additional configuration options specific to numbers such as decimal places, rounding and so on. Date Date is another commonly used input control. It presents two different ways users can pick dates- either from a calendar or from a dropdown list for the year, month and date. There is a sub category in Editable controls which display a list of values for selection. These may take the form of a Radio button, Select box or AutoComplete. List based controls requires the selection of the source of the list and this is handled by a parameter defined in the control. Radio button choices can appear either vertically or horizontally just by configuring the Orientation parameter. Again the choices vary with the control type and in the case of dropdown we see additional choices such as what to display as the option, placeholder text and when to load the choices so that the screen can render more quickly. However, all controls provide similar formatting options wherein we can use the skin rule to format the presentation. All Auto generated controls come with standard formats and allows us to define custom styling formats as well. Defining a New Control There are quite a few auto generated controls that comes with the product that can be used as-is and customized by using the parameters and the skin rule. In some cases we might find that we are making the same set of changes to the parameters. How do we address those changes? We use custom auto- generated controls. However, we need to make sure we are creating a new auto-generated control only when it really makes sense. We do not need to create custom controls for a style change because we define the style in skin rule and reference that format in the control. Similarly we do not need to create custom controls if the change is not drastic. One example where we could create custom controls is if similar set of actions are carried out, say if we want a custom submit button which does a post value and refresh section. Earlier we learned the definition of the Text Input control. The Pega product development team has created additional controls which merely extend the Text Input. So the Text Input control is saved as the Number control which has the type selected by setting the type as Number. Similarly the Currency control is defined as a new control just by selecting the Currency in the Symbol field, which then displays additional configurable fields for configuring the currency. We can define a new auto-generated control by first selecting the control mode and then the UI element. After selecting the UI element the options and formats change based on the UI element that we have chosen. Specialty components provide capabilities for the developers to build and integrate the third party components such as jQuery, Flex, Flash, JavaScript in the UI rules of a Pega application. Specialty components can receive and send data back to the data elements used in our Pega Application. Pega publishes the API's which can be used to set and get values. Specialty components support JSON (Javascript Object Notation) type data exchange. Specialty components support both single level and embedded properties. If a page list is used as a parameter then all the associated properties are also available in the specialty component. The specialty component is saved in a section rule which makes it reusable, and also supports rule specialization and rule resolution and behave similarly to any other section rule. The specialty component is recommended since it offers the ability to leverage other UI rule capabilities. The section rule that is marked as a specialty component can be included in other auto-generated rules. Specialty components minimize the need to build custom UI on top of our application to support features Portals A portal rule belongs to the user interface category and unlike other rules it does not apply to any class. When defining a portal rule, we enter only the name and ruleset to which it is associated. Portals primarily use two different UI rules - Harness rules for the content and Skin rules for styling. Portals can reference the same skin used in the application rule. It is important that we do not change the fields in the Configuration despite them appearing as dropdowns. Like Portals, skin rules do not have an Applies To class but we need to pick the Applies To class name for the harness that is being used. In most cases it is recommended that we use Data-Portal as the class name since all the sections that we can see in Case Worker and Case Manager portals are defined in Data-Portal class and we need to define the harness in the same class so we can reuse the sections. Customizing portals can be done in one of the following three ways: 1. Modify the sections that are used in Case Manager or Case Worker portal. 2. Modify the harness used in Case Manager or Case Worker portal to include new sections or screen layouts. 3. Create a new portal rule and a new harness rule. The harness can reference both new and existing sections. The skin rule that is used in the portal rule must be customized to ensure that the portals are responsive when opened in a mobile device. Harness rules used in portals are a little different in their construction from other harnesses that are used in work forms such as the perform and confirm harnesses. Harnesses use screen layouts to group sections inside its layout. When defining a new harness, the first step is to pick a screen layout from the seven supported layouts. The screen layouts offer customization to the skin rule in terms of background color, width, alignment and so on. When constructing new portals, Live UI comes handy to identify the sections that can be reused as-is. For instance, we can use pyRecents for displaying the recent items instead of recreating a new section that would do the same thing. Similarly, the header contains the sections displaying logo, a link for the operator menu, and a text box to search on items that can be added as-is. It is a very common requirement to embed reports on manager portals. We do not need any special sections for this. In the section, we need to add a chart control and then configure the control using its properties panel. The key things to change here are the chart type (which decides the type of chart we can display) and the Type filed in Data Source (this can be data page or a report definition). Menus are another type of control that is used mostly on sections referenced in portals. Menus offer the ability to display the list of items in a menu format providing the option for the user to navigate as a menu. The menu control references another UI rule type, navigation which is used in displaying a hierarchical relationship. Navigation rule unlike other UI rules present a different interface. We can add an item and configure actions which perform a specific action when clicked. This menu just displays one item below another and there is no hierarchical relationship. Dynamic Container is an auto generated way of presenting the worklist and cases in runtime. Dynamic Containers support presenting them in one of these three ways: 1. Single Document 2. Multiple Documents 3. Dynamic layouts When creating a custom portal, the dynamic container is added in the section referenced in the center panel because that is used as a work area by the users. The dynamic container is part of the layout group in the designer canvas and after adding it we need to pick the rule that it can reference to. It can directly reference a section or a harness. The mode field is used to select Single or Multi Document. What is the difference between Single and Multi Document? In the case of Single documents, there is only one active document used (for example, a case). When another case is created it closes the existing document (removes the case from clipboard) and open the new case. Single Documents are useful in cases when users work on one case at a time or access the work list to open cases. Single Documents are pretty useful in keeping the application from overloading too much data and the performance is always better. To add a single document, we just add the dynamic container outside the existing layout in the section and then reference a section or harness. Multi Document mode allows opening multiple documents at the same time. It is added the same way as a single document. The system allows us to have a maximum of 16 simultaneous documents and by default it sets 8. Both these modes can reference section or harness for content. Typically we use sections when simpler layouts are created and harnesses are when we want to build the whole screen. The default case worker portal uses the work list section which displays the work list while the default case manager portal uses a harness which displays the whole dashboard. The third way of adding dynamic containers is placing it directly on a layout. This is normally used when we are using it in one of the side panel or header panel and we want to refresh a small part of it and not the whole panel. In some cases dynamic containers are added in a tabbed layout so that multiple documents are opened as new tabs. We can also add the dynamic container inside a dynamic or a column layout. When we add a dynamic container directly inside a layout it disables the default view and also renders the dynamic container only in iFrames. We can select a section in the Purpose field and we can also use Multi Document mode. Mobile How do we implement this in Pega? For Sidebars (left and right), responsiveness must be set so that the columns do not appear and get rolled into a navigation menu when accessed on a mobile device. When the response breakpoint is reached (in this case when the screen width shrinks to below 768 pixels) it gets rolled over to the header as an icon. We can customize this and change the width at which it rolls over to that setting, and the icon that is being used to access the pane. A float can cause an element to be pushed to the left or right, allowing other elements to wrap around it. If we want UI elements to, say, hug to the right, we would float right. That way regardless of the width of the screen, the element always stays hugged to the right. While designing the sections that are used in the header and footer, we need to make sure that we use floats so that the inner layouts automatically align to the left, center and right respectively. As an example, setting floats ensures things such as the logo image appearing in the leftmost corner and the logoff link appearing in the rightmost corner irrespective of the device on which it's being accessed. Configuring Responsiveness in Layouts Dynamic and Column Layouts: Responsiveness is configured in the skin rule by using a response breakpoint. In the layout configuration we pick the screen width at which the layout is formatted to a different layout. So for instance, a two column Inline becomes stacked at 480 pixels which mean any device accessing that screen width renders the layout in a stacked format. When using column layouts, the column that renders primary fields appears at the top while the other columns roll down below it. This can be done in the skin rule as shown in the screenshot. Layout Groups: Layout groups allows us to group content in different ways by using different presentation options such as tabs, accordions, menu items or as items stacked one below the other. The great thing about Layout groups is that they can be set up to change from one presentation format to another based on the responsive breakpoints. Configuring Responsiveness in Grids Grids are one layout where we can configure responsiveness in the skin and also on the actual grid. We need to use the properties panel of the field to set the Importance flag as 'Primary', 'Secondary' and 'Other'. The skin rule offers the ability to remove the fields that are marked as Other on a specific breakpoint. When transformed as a list, the Primary field appears as header while the secondary appears below the header. Pega 7 supports accessing the mobile application in offline mode, which means the application can be rendered when the device is not connected to the internet. Offline mode is useful in presenting data and also to collect data. After data is collected, the device needs to be synchronized online to complete work processing. For applications to work offline, two things need to be done. 12. When the device is online, data required for offline access must be cached in memory for later access. 13. Similarly when the device is online, data collected when offline must be synchronized back to server. Offline access is enabled at the case level, so we need to use the Case type details screen to enable the offline mode for a specific case. Offline applications also requires using optimistic locking. This can be done in the same screen. Offline capabilities allow the following functionality: Users can Login or Logout from the application Create new cases that are offline enabled View cases and harnesses Update cases that are offline enabled View Worklist and process Assignments Some limited dynamic UI is supported Applications require extensive testing to ensure that the requirements are met. We highly recommend that applications get tested periodically. How do we test applications in mobile devices? There are quite a few ways we can do this. 1. Use the mobile device: The best choice is highly to test on the actual device because other modes can simulate the experience only to some extent. We are listing other choices since it is not always possible to test on all possible mobile devices. 2. Developer mode in Safari: After enabling the Develop menu in the Advanced preferences of the browser, use the User Agent to choose the specific mobile device. This works well if we are testing on Apple devices such as the iPad and iPhone. Using the Android Emulator: We can simulate the android browser by downloading the Android SDK and then extracting it to access the android emulator. More information can be found in . 4. Using Chrome User Agent: Launch chrome with the desired user agent from the command prompt. In the command prompt enter something similar to the following: C:\Users\<username>\AppData\Local\Google\Chrome\Application\chro me.exe --user-agent="<useragent>" 5. <username> is the windows user name and the <useragent> is the string that has the device, the build, the platform version in it. 6. Remote Tracer: Using tracer, you can trace all requestors logged in to that node. This helps in tracing a user session logged on a mobile device. 7. Using other Third party tools: Following are a few bunch of free mobile site UI testing tools to help in testing mobile applications. Mobile Phone Emulator: A popular mobile phone emulator, this tool allows you to test your site across a large number of mobile devices. MobiReady: a testing tool that evaluates the how well optimized your site is for mobile devices, taking into account mobile best practices and industry standards. You'll get a score from 1-5 and a full site analysis. Screenfly: Screenfly lets you view your website on variety of devices. Just pop in your URL. iPad Peek: As its name implies, this tool lets you see how your site appears on the iPad or iPhone. Localization The Pega 7 system by default uses the locales that are set in the environment settings of the client machine. These changes are configured during the installation of the OS and can be changed if required. On a Windows machine, these can be modified using the control panel. Making changes in the control panel impacts all programs running on the machine. The browser settings are another place where we can override the locale. Updating the browser affects all websites opened using that browser. For example, in the Internet Explorer browser, we can choose Internet Options and set language preferences. Overriding locale settings in Designer Studio We can also override these locale settings for a specific user session in the Designer Studio by selecting User Interface > Localization Tools > Locale Settings. The Locale Settings dialog provides various options. Let's take a look at each of them. Here is an example of the dialog when we first open it. We can click the Settings button to view the current setting. The values are referenced from the browser and machine settings. Let's use the Demo feature to see how the locale settings affect the presentation. In the Locale Settings dialog, we change the locale and time zone to Chinese and Asia/Shanghai, respectively and add the ISO currency abbreviation for Chinese yen. We then click Update to save the locale for the current session. We click Demo to see the effects of the update. Note that the locale and currency settings are in Chinese and Chinese yen, respectively. Note that the text describing day and year are in the Chinese language. The Currency values are in Chinese yen. We can select other locales in the Locale field and click Compute to see the effects in the Selected locale area. Note: Compute is run on the local workstation. If you are trying this on your local machine, make sure that your machine is capable of running a JAVA applet. Otherwise, you will not be able to see this work. When the user logs out and logs back in, the system reverts to the original default settings. In cases where we want users to be able to switch locales during their sessions without having to log off, we can use the engine API named setLocaleName to change locales programmatically. For more information, consult the Pega API document by selecting the APIs > Engine menu item in the Designer Studio header. Localization is the process of adapting internationalized software for a specific region or language by adding locale-specific components and translating text. Pega automates the process by supplying a Localization wizard we can use to translate the text strings in labels, captions, instructions, and so on, that appear in the Pega user interface. Pega also ships language packs for some languages, which translates many of the strings and reduces the effort needed to localize applications. As a best practice, use the wizard to get the strings from the application even if a language pack is used. The output file can be downloaded and we can overwrite the changes if required. The localization wizard can be started in either of two modes: Create a translation package. Upload the completed translated package to a language-specific ruleset. Creating a Translation Package Select Designer Studio > User Interface > Localization Tools > Translate to New Language. The wizard consists of four steps. 1. Select Languages —Select the application ruleset that we need to translate. The options list all the possible languages. We are not restricted onlyto the language packs shipped in the product. In our example, we select Spanish. When we complete this step, the system creates a work object using the prefix pxL. 2. Select Rulesets —Select an unlocked ruleset version in which to save the rules. We can also choose to include all PegaRULES application rulesets and export for them for custom translation. If we wish to acquire a language pack that Pega 7 ships (such as Spanish), we can exit the wizard at this point and import the pack. We would then reopen the translation in the Translation in Progress mode. If we will not acquire a language pack or if the languages do not have language packs we can indicate that we want to include the Pega fields in our language package for custom translation as shown here: 3. Enable Records —Start the process of enabling and validating the selected records. When the process completes, we see rules in the Errors list that require manual updates. To fix a rule, we click the link in the Rule Name column to open it. Export Translations —The system generates the translation package. If a Pega language pack is installed, the system creates translations for the strings using the pack. The system creates an XML file for all the text strings that require translation. We can download this as a zip file and then open it in an Excel spreadsheet to see the fields. The team can now work on updating the translations for all these strings in the spreadsheet. Uploading the Translation Package to Our Application When the translation process is complete, we are ready to upload the contents of the package to the application. We select Designer Studio > User Interface > Localization Tools > Translations in Progress. In the first panel, we select the wizard work item for the package we created. We then complete three steps. 1. Import Rulesets — We select a new default ruleset created by the system to store the Spanish translated strings. So that the translations can be shared across applications we would select an organization ruleset. We add this ruleset to the access groups that require Spanish translations. The system automatically displays the Spanish text when the user interface is rendered. 2. Upload Translation Pack— We import the zip file that contains the translations. 3. View Import Results — The system imports the translations and creates the ruleset and the associated rules required to save the translated strings. Localizing the application is relatively easy if we develop the application using best practices. If the requirements indicate the localization must be supported, we must be aware of the following guidelines. To get the wizard to pick the rules, we must make sure that the localization flag is enabled in all harnesses and sections. Some rules such as correspondence, paragraph, and work parties contain text that must be manually created, translated and stored in the corresponding ruleset. We must define field value rules for all the labels and other text strings. In the following example, we would clear the Use property default checkbox and enter a field value. To understand how the localization wizard uses values in a field value rule, let's look at an example. In addition to the Apply To class, the field value is identified using the field name and the field value. The field name indicates how the value is used in the user interface. For example, pyCaption means that the field value is used as a label on a section. The localized text in the To area contains the actual text that is saved as part of the rule. When we translate the application to Spanish, the localized label stores the translated field value for Hire Date in the Spanish ruleset. Standard Field Values and Names Pega includes standard field values using field names as shown below. The Localization wizard picks all rules with these field names; the language packs automatically translate them by default. pyCaption — this is associated with all the labels that are used in the sections pyInstructions — this is the instruction text that is entered in the assignment shape of the flow pyStatusLabel — used for status values such as New, Resolved-Completed, and so on. If we create any custom statuses then we should define the field value rule for that work status as well. pyLabel — used as short descriptions for case types that appear in the user interface such as flow actions. pyMessageLabel — this is associated with the error messages that can be added in the activities or in the validation rules. pyButtonLabel — this is associated with labels that convey a command used in click-action controls such as buttons or links. Examples include Save, Cancel, Submit, Next, and OK. pyToolTip — this is associated with text used in a ToolTip. For example, "Case ID." pyActionPrompt — this is an explanation in sentence form that is presented as a ToolTip. For example, "Find a case by its ID." accessability Pega supports developing accessible friendly applications for the users who use assistive devices to access the application. Pega follows guidelines established by Section 508 of the United States Rehabilitation Act, the Web Accessibility Initiative and the Disability Discrimination Act. Pega includes a special ruleset named PegaWAI in the installation bundle. The ruleset renders the application in accessible friendly mode without any extra coding effort. We need to import the PegaWAI ruleset and add it to the application rule production ruleset list. We also add the ruleset to the access groups of users needing accessibility features and controls. On the access rule Advanced tab we enter the ruleset in the production ruleset list and click the Enable accessibility add-on checkbox. When users log in they see the screen formatted for accessibility, and the labels can be read by JAWS or other screen readers. Many controls and labels are formatted differently. For example, the date control displays as a dropdown box instead of as a calendar. Some images used in field labels such as the required icon are rendered as text descriptions. Viewing the Accessibility Report Pega includes a report to test the accessibility compatibility of the application by going to Designer Studio > Application > Tools > Accessibility Report. We can run the report only after adding the PegaWAI ruleset to our access group. The report groups all rules as harness and flow actions. Drilling down further we can see all other referenced rules and controls. The report indicates compatibility % for each element. Note the TabbedScreenFlow7 harness, which has a score of 0%. By default , accessibility is enabled for all auto generated rules. In this example, we can click the element on the list to open the rule and reset the Accessibility checkbox to Yes. In general, we should test other rules and enable this flag. We can hover the mouse pointer over other elements to see why they are not accessible. In some cases, a priority appears indicating the nature of the accessibility violation and an alternative solution the violation. Tips for Building Accessible Applications Pega makes the application accessible friendly and it does not require any special code. The compatibility report is useful in finding the recommendations for some violations. However, we need to design the application according to specific guidelines to make this work seamlessly. Here are some examples. We should use relative and not absolute units in the markup language. We should avoid using px or px-fixed layouts. It is a best practice to use dynamic layouts. We should not use standard markup or inline styles. Instead, we should use style sheets. Events based on mouse actions should be avoided because this impacts most of the AJAX calls. Typically this involves mouse actions like onClick on a checkbox, Hover, automatic calculation of values, and so on. We should avoid icons and replace them with buttons and then add an ampersand "&"before the caption to create a shortcut. For a Save button we enter &SAVE as the caption and the shortcut key will be ALT-S. We can create shortcut keys for most common actions such as Submit, Save, and Cancel, and for the flow actions if there are multiple flow actions in the same assignment. IAC Typically, we access a Pega application on the Web using composite portals, which include the case manager, case worker, or Designer Studio. The Pega user interface displays in the entire browser window. The portal rules do not require integration with an external system. IAC, on the other hand, allows us to embed the Pega application as a mash up by way of a composite application embedded as a gadget in an existing Web application. A mash up is a term used for Web applications that merge data and content from multiple sources and presents them in a common user interface. IAC is designed to work with the all standard Pega harnesses and sections. UI customization is not necessary. In other words, an existing Pega application can be embedded as a mash up without modifications. The example below shows a gadget in the marked area that presents an auto insurance quote screen flow within the company Web page. The process was built using standard Pega UI elements. IAC leverages all the different authentication and authorization methods supported in Pega. Standard IAC authentication activities in PegaRULES ruleset can be customized to integrate with the third party authentication implemented in the external Web application. IAC is designed to work seamlessly in any Java or .NET Web application. There are three main components in IAC; the Pega Gadget Manager, the Pega Instance, and the Pega Composite Gateway. Let's look at all them now. Pega Gadget Manager — A JavaScript file PegaInternetApplicationComposer.js file which contains the scripts to launch the Pega application in a Web application as a gadget. We need to perform two tasks when setting up the Gadget Manager: 1. Add configuration properties such as URL of the servlet, system ID of the Pega server, name of the application, and so on. 2. Create an HTML DIV element for each Pega Gadget, the gadget should indicate the Pega action, action attributes and the Gadget Manager attributes. We use the Gateway Configuration Console for both these tasks, which is described in the next page. Pega instance —The Pega server containing the application handles the requests coming from the IAC users. Pega Composite Gateway — A servlet that is used to proxy the communication between the IAC Gadget Manager and the Pega instance. This servlet (prgateway.war) is included in the Pega installation bundle. The system administrator must deploy prgateway in the Web server. The gateway servlet and the Web application containing the Gadget Manager must be co-located in the same domain. The Pega application is deployed on another server. If the Pega instance exists in a different domain then we can only access it using a gateway servlet. After we've deployed the Pega Composite Gateway servlet, we can launch the Gateway Configuration Console, which is used to configure connection parameters enabling IAC gadgets to communicate directly with Pega applications. The Console is included as part of the standard Pega-IAC ruleset, which we must add to our application. Before using the Console, edit the prconfig.xml on the Pega instance to include this entry: <env name="Authentication/RedirectGuests" value="false"> Configuring the host To begin using the Configuration Console, we first specify connection settings for the system that hosts the application we want to access on the Host Configuration page. The Console generates property settings and gadget attributes that are specific to a host. We click Add to configure a new host. In this example, we use localhost because the Gateway is installed in the same instance where Pega exists (this is typically not the case in an actual implementation). reporting When creating summary type reports we need to choose an aggregate column. This aggregate column can be a function applied on a column such as count, average, max, min on a specific property (Column). Summary reports help in providing a less detailed view by grouping related items. Assume we want a report of all the active cases in the system. A list based report would have several hundred rows and it is hard to interpret anything from a list that long. A Summary report might be a better choice. Summary reports provide additional clarity, because on the same report we can group by the operator name column so that the results are grouped by operator and it also shows the count of active cases grouped by each operator. In a summary report, we select the column on which we want to summarize. For these two columns to appear as headers, we use the first field in the grouping category in the Report Viewer tab. When we review the report output, we can see that it displays a maximum of three results per group, this can be done by setting the rank in the Query tab of the report editor as shown below. We chose pxCreateDateTime to pick the three recently created cases based on create date. Here we display the Top Ranked for each group to display three recent cases for each group. A pivot table is a program tool that allows us to reorganize and summarize selected columns and rows of data in a spreadsheet or database table to obtain a desired report. Report definitions can be used for creating pivot tables. When reports are grouped using two properties, pivoting them groups these properties in rows and columns, thereby creating a pivot table. Let's see how we can create them in action. This report is nice but harder to read, putting this in a pivot table improves readability. Open the Report in report editor and then right click on the column and select Display Values across columns. The report now uses the create date property across the row and the work status across columns. While creating a pivot table, we need to decide which property to display across columns. We need to select the property that has a finite number of values as we wouldn't want the report to grow in such a way to require horizontal scrolling. This report is a good design because the work statuses will only be a handful of values, whereas the create date time continues to grow over time. Sub reports join two different classes using specific join conditions and the results from the second report can be used to filter the first report or can display as part of the first report. Sub reports can be used to calculate aggregate values, such as counts, maximums, and minimums. Let's make a distinction as to when we would use sub report versus a summary type report definition so it is clear when to use each if aggregate calculations are required. This illustrates a best practice. If it is necessary to have an aggregate calculation for a flat list of distinct records, including multiple properties for each record, then we should use a list report with a sub report. However, if the aggregate calculation is needed for a group, then we should use a Summary Report. Let's look at other examples where we need sub reports. Sub reports and Class joins are closely related. When should we use a simple join to associate two classes versus a sub report? Joins are useful when we associate two different classes and present columns which belong to the class in the second class. For example, we use a predefined association between Assign-Worklist and the work class to display columns belonging to both these classes. Assume we want columns that we joined from other tables to display aggregate calculations, for example we want to display a report that shows a list of all purchase requests, and includes columns for the time when the request is created, the operator who created the request and case ID, and all these columns come from the purchase request work class. Now we need to include the aggregate calculations on subcases of the purchase request: the date the first subcase was resolved, the date the last subcase was resolved, and the subcase count. This requires us to use a sub report. Sub reports are added in to the main report using the data access tab by including the sub report name, applies to class and a prefix. Then use the configuration dialog to set: 1. Where the sub report result is used. 2. Filter conditions to join the report 3. Join conditions to configure inner, outer, left or right join To use the columns from our sub report we need to enter the prefix we specified in the Data Access tab. Another use case for sub reports is to use the results of a report as a filter for the calling report. Joins can be used in some of these cases but sub reports are useful when we want to exclude the choices based on the results of a sub report. For example, we want to know the list of all operators who have no assignments past goal. A trend report presents counts of cases or other data elements at a series of points along a continuum, normally a time line. The X-axis of a trend report displays data points on that continuum representing weeks, months, or quarters in a year, or some other meaningful increment. One column of the data supporting the trend report displays one or more Single Value properties of a DateTime type. The timeunit data requires us to use an SQL function (refer the Atlas- Standard SQL function rules) to get the complete list. When using a line chart, the time-unit data must be in the second column to present the correct data. Let's look at this with an example, here we are trending the report based on date and we used the pxDate function. When we take a closer look, there are no cases created on 11/18, 11/19, 11/20 but if we look at the report someone might conclude there was one case created in 11/18 as well. Let's switch the order in the report definition to make the Date column as the second column and the count column as first column. Now the report looks like the screenshot below. This is applicable only for line charts. If we are using bar or column or spark, then the report generates similar output in both cases. A summary type report definition can be configured to display the results in the form of a chart. The cChart editor allows us to adda chart if the report definition includes at least one aggregation column. All non-aggregate columns are referenced as group-by columns. The Chart tab indicates where to add the aggregate and the non-aggregate columns. After adding them it is necessary to use the Options icon to configure the data displayed. Combo-charts require two aggregate columns. Charts can be previewed only in the report editor which makes it logical to edit them in the report editor instead of directly in the report definition. There is an exception to this rule. If we are using map reports then we cannot edit them in the report editor. Bubble chart is a special category that can be used to provide a 3-D view of three numeric columns. The third dimension is represented in the size of the bubble for each data point, while the other columns are represented in horizontal and vertical axis respectively. Gauge reports can be very effective in displaying burn rate, throughput and other measurements of current state. There are 10 different ways of displaying these charts (including five different Angular formats). Pie charts are extremely useful when presenting reports that show the percentages of various categories; such as how many new purchase requests are placed by various departments (engineering, marketing, HR) and so on. Selecting between Bar and Column reports depends on whether we want the results to be presented in horizontal or vertical bars. Funnel and pyramid are useful to display the numbers across various stages, such as the status of a claim case, how many are in Open, Pending-UnderwriterDecision, Pending-WaitingforDocuments, Pending-ManagerApproval, Resolved-Rejected, Resolved-Approved and so on. Maps require us to add the map using the settings landing page. There a wider variety of maps available which can be selected by searching using the autocomplete in the map type field here. Reports created by managers in a production environment should be associated with a production ruleset. These reports are not considered part of the application itself, but are in the production layer that sits on top of the application layer. To configure a reporting ruleset, we specify it in two locations: On the application rule Definition tab. On the access group rule Advanced tab. Note: Make sure that the ruleset has one unlocked version, does not use check-out, and has the necessary application rulesets as prerequisites. We can use settings in a report definition to customize the report presentation and behavior, and control which users can or cannot run a particular report. The Default Starter Report When a manager adds a report in the Case Manager portal and then selects the case type and report type, the system copies a standard report (either pyDefaultReport or pyDefaultSummaryReport) to the specified case type. Managers can then use this report as the starting point for a new report by creating a new name key. We can copy the standard reports into the appropriate work class and application ruleset to customize the settings for reports created in production. Report Presentation and Behavior The report definition provides a wide variety of settings for customizing our reports. Here are a few examples: We can change the default thresholds on the Data Access tab. For example, we can increase the maximum number of rows from 500 to 1000. We can also choose whether to use non-optimized properties. On the Report Viewer tab we can customize user interactions such as what buttons to display. We can also choose to present records in multiple pages and define how many rows to include in each page. Report Privileges We can associate privileges with a report to control who can run it. We create a privilege rule and update the access role to object rule to convey the privilege to a select group of users. We specify the privilege on the Data Access tab. By default, there are no entries in the privilege list — any user who has the corresponding ruleset in the ruleset stack can run the report. If we add privileges, operators must be granted at least one of privilege in order to run the report. Updating Report Resource Limits at the Application Level We can update default resource settings at the application level. These are the same settings we saw on a report definition Data Access tab, which apply to that report only. To see the current limits, go to Designer Studio > Reporting > Settings > Settings. We may want to change these values. For example, we may want to set a higher limit on the number of rows to retrieve if the query is likely to produce many results and we want to include all of them. We use the following system setting rules to change the limits in our application. Remember to log off and log back on for the settings to take effect. Running Reports Maximum number of rows to retrieve — pyMaxRecords Maximum elapse time for database query — pyTimeoutQueryInterval Exporting Data from Reports Maximum rows to retrieve — pyExportMaxRecords Maximum elapse time for database query — pyExportQueryTimeout A report may work well in development but when migrated to a production environment it might not return the results we expect. Or it might cause an error condition. These problems are typically the result of incorrect mappings from a report to the table in the database. A report is applied to a class, and the class is part of a class group, for which there is a corresponding Pega database table instance, which has a pointer to the actual database table. Migration problems often originate from this database table instance. As shown in the example above, the table specified (pc_SAE_HRServices_Work) might never have been created in the production environment, or the database table instance itself might not exist in production. Remember that database tables are data instances that have an associated ruleset setting that is used to facilitate export and import. However, this setting is not required and so it is possible that it was missing or incorrect. As a result, these records might be left behind when installing a new system. The system gathers statistics for every run of a report in an instance of the class Log-ReportStatistics, which represents one or more executions of a particular report in a given hour. Each log entry is populated once per hour. Report statistics are enabled by default. We can disable them using the dynamic system setting for reporting/enable statistics and changing the setting from "true" to "false." There are four standard reports that provide visibility into this statistical data. We use these reports to see what reports are being used: pyDefaultReport — gives us a detailed view of report usage data pyDefaultSummaryReport — gives us an aggregated view of the data pyReportErrorSummary — shows us what reports have produced errors pyReportUsageByUser — show us who has run what reports. Let's look at the pyDefaultReport shown below. In addition to the Applies To class, report name, and user for each report, there is a Run DateTime column, which contains timestamps indicating when the system created a log entry.(Note that the timestamp does not represent when the report was run as each instance can represent multiple runs). The timestamp could be up to an hour after the reports were run. Also included are key metrics for the report executions such as run, error, and row counts, and the total elapsed time. We can copy the standard reports and make changes to them to suit our custom requirements. If our reports are performing sub-optimally, we can examine factors in the report definitions by asking these questions: Are all of the properties in the report optimized? If there is a property value that must be extracted from a BLOB column rather than from an exposed column, this can lead to added query time. For guidance on whether to optimize specific properties, see When to use — and when not to use — unoptimized properties in reports. To help us locate unoptimized properties, check the Display unoptimized properties in data explorer checkbox on the Data Access tab. Unoptimized properties appear as selection options in the Report Editor's Data Explorer, in the Calculation Builder, and when defining report filters. Use the Property Optimization tool to expose top-level single value properties as database columns. For more information, see How to expose a property as a database column with the Property Optimization tool. Are there any outer joins (class joins, association rules, or sub-reports) in the query? Selecting "Include all rows" on the Data Access tab can be costly. It causes the system to use an outer join for the report in which all instances of one of the classes are included in the report even if they have no matching instances in the other class: Select "Only include matching rows" if possible. Are many rows retrieved and has paging been turned off? One of the main purposes of the paging feature is to prevent excessive amounts of data being pulled onto the server. Turn paging on by selecting Enable paging on the Report Viewer tab. For more information, see When and how to configure paging in reports Sometimes, the best way to troubleshoot a Pega report is to analyze the query outside the Pega system. Use the clipboard viewer to locate the pyReportContentPage page, and get the value of the pxSQLStatementPost property. Analysis of the results could indicate, for example, that database statistics must be gathered, or that additional indices are necessary. Let's consider the two most fundamental choices we make when creating a report: Case type — the class of instances to be reported Report type — list or summary There is a close logical connection between these settings. In the following example, work status data is used in two distinct classes and report types. For instance, assume we want to see a list of all cases in the application, their work status, and the stage they are in. Because we want to see application-wide data, we would create a simple list report in the work pool rather than in a case type. On the other hand, consider creating a summary report for a specific case type. For example, we want to create a purchase request report showing the total purchase request amounts by cost center and by work status. Because we are interested only in purchase requests, we create a report definition in the purchase request case type. We need to create a column containing the purchase request amounts, which will be aggregated. We enter the declare expression .TotalPRCost, which gives us the total purchase order costs all the line items in a purchase request. Cost center and total PR cost data are in the purchase request class so there is no need to configure an association between classes. We summarize the values in the .Total PR Cost column as shown here: Optimizing properties Most case types that serve as a source for a Pega 7 report have data stored in an extendable BLOB column. The BLOB can have both scalar properties and arrays like page lists and page groups. While this data can be read directly from the BLOB for a report, the property optimization wizard lets us expose scalar properties as dedicated database columns. Select a property in the Application Explorer and use the right-click menu to open it. The Property Optimization wizard can also be used to expose properties in page lists and page groups; this creates declare index rules. There are some properties that might not be reported on directly, but instead used as a source for a calculation. Such calculations are done using a SQL function alias rule. See How to create a Declare Index rule for an embedded property with the Property Optimization tool. Including data in related classes or reports The Data Access tab on the report definition rule provides association rule, class join, and sub-report features that allow us to include data in related classes or reports. Association rules let us define a relationship between two classes based on matching values in pairs of properties. We can then automatically add a join to a report that displays properties from both classes referenced in the association. For more information, see When and how to create an association rule to support reporting. Class joins enable reporting on data from multiple classes or tables. For each class join, we define one or more logical conditions that specify how to match or join corresponding instances in different classes. Sub-reports allow results from any report definition (including a report definition defined on a different Apply To class) to be referenced and included in a different report definition. For more information, see When and how to use sub-reports in Report Definition reports. We use a centralized data warehouse if there is a requirement to report on data from Pega 7 in concert with data in other systems. BIX is a Pega 7 feature set that extracts Pega 7 data to an external data warehouse. This capability provides a consolidated repository for all enterprise data. It also allows us to use 3rd party reporting tools that are designed specifically to work with enterprise-wide data. BIX itself is a set of rules and rule types installed by including the BIX ruleset in the application stack. We set up BIX by configuring extract rules, a rule type included as part of the BIX installation. BIX can extract Pega 7 data to XML or CSV (comma separated value) files. It can also extract data directly to tables in a database schema. Like work, rules are instances of internal classes. This fundamental technical concept makes rule reporting possible. For example, cases for managing applications for open positions at SAE Candidate Corporation are instances of the class SAE-HRService-Work-Candidate. Property rules such as .TotalAmount are instances of the class Rule-Obj-Property. Do not confuse the class of a rule, which dictates the rule type, with the Appl To class of the rule. For example, all property rules are instances of the same rule class. Most rules also have distinct Apply To classes, which indicate the implementation location and are keys that are used in rule resolution. Remember that reports are created in the class of the items being reported. If a report is in the Candidate class, the report returns candidate cases. Likewise, if the report is in Rule-Obj-Property as shown here we get a list of properties when we generate the report. Using the Data-Rule-Summary class as the basis for rule reports When we create rule reports, we usually want to span rule types and join the rule classes. To achieve this, we use the Data-Rule-Summary class, which contains an instance. and is described later in this lesson. Standard rule reports are packaged with Pega 7. Many of them are used on the Designer Studio landing pages. Let's look at some examples on the Application Development landing page by selecting Designer Studio > Application > Development. The Recent Actions shows what updates have been made most recently. Because this shows rule history events, the underlying report for this landing page is in the History-Rule class. The Checked Out Rules report, which is in the Data-Rule-Locking class —stores the rule locks that are created when a rule is checked out. The report includes the pzInsKey of the locked rule and pxInstanceLockedBy, which points to the operator who has locked the rule. The Weekly Rule Updates report, shows how many rules, by ruleset, have been updated in the last 8 weeks. This report only looks at the most recent update timestamp. As such, if a rule has been updated more than once, only the most recent update is counted. The Data-Rule-Summary class is queried. Historical data is not being mined here — History-Rule is not part of this query. The Developer Activity report, on the other hand, does look at History-Rule class historical data. These are check-in counts, not just rule counts. Let's view the standard rule warnings report below by selecting Designer Studio > Application > Guardrails > Warning Details. We can use the expand row arrow to display the specifics about each warning. Let's view the standard rule warnings report below by selecting Designer Studio > Application > Guardrails > Warning Details. We can use the expand row arrow to display the specifics about each warning. The content in this landing page grid is supplied by the pxRuleWarningsInApp report definition in the Data-Rule-Summary class. 284 The report contains properties such as pxWarningSeverity and pxWarningType which are in the IndexWarning class — an index of warnings embedded in all the rules in the system. The properties are available because of a class join shown here: We click the Edit conditions button and see that the pxReferencingRuleInsKey property in the IndexWarning class points to the pzInsKey of the corresponding rule. Custom fields applied to a rule provide a flexible way to supplement our rules with metadata, which can be used as a source for a report. To find rules using these custom fields, we use the Find by Custom Field search tool available from Designer Studio > Process & Rules > Tools > Find Rules By Custom Field. Note: Custom fields are not available for Data- objects. Creating a custom field In this example, the process begins by creating a purchase request, followed by a confirmation of the request before it is reviewed by the approver. Think of Create and Confirm as two sub-steps in the request creation step of the process. We want to represent this concept by using a custom field on rules related to the step. We can then generate reports that show all rules for request creation. We want to add a custom field to the Review flow action because it is related to request creation. We open the flow action rule and add a custom field in the Custom Fields area on the History tab. We define the custom field property ProcessStep (no spaces) and value Request Creation as shown here: 286 When we submit the update, the system creates the ProcessStep property in the Index-CustomFields class. The new field appears in the Custom Fields area as shown here: Adding custom field properties to reports In the Data-Rule-Summary class we create a report definition that provides generic rule information such as class, rule type, rule name, and description and includes our new ProcessStep custom property. Before populating the fields on the Query tab, we go to the Data Access tab and join this Summary class to the Index-CustomFields class using a prefix of CustomFields. The join filter condition is .pzInsKey is equal to CustomFields.pxReferencingRuleInsKey. Returning to the Query tab, we add the report properties including our ProcessStep custom property in a column and as a report filter condition. 287 Optimizing the custom field properties Note that there are two warnings indicating that the ProcessStep property has not been optimized and may cause poor performance. Although the system creates properties when we define the custom fields, they are not exposed as columns. We usually optimize a property by selecting it in the Application Explorer, right-clicking, and selecting Optimize for reporting. However, this is not allowed for classes in Pega rulesets, such as the IndexCustomFields class. For properties in Pega rulesets, we use the Modify Schema wizard to create the database columns and expose the properties. Go to Designer Studio > System > Database > Modify Schema. Select pr_index_customfields on the Select a Table window, and click the property count link in the View Table window to view the table properties. Select the property of interest to be exposed and click create selected columns. When we resave the report, the warnings are eliminated. Note: Alternatively, a database administrator can create database columns outside Pega 7. JMS Pega supports integration with message-oriented middleware, or M.O.M., based on the JMS (Java Message Service) and WebSphere MQ standards for both connectors and services. JMS is a part of the Java EE platform. WebSphere MQ is an IBM-developed public standard. Let's begin by looking look at how JMS works and then how MQ relates to JMS and when to use each one. The sender of a message is called a producer. Producers create messages and send them to a destination on the M.O.M. The recipient application is called the message consumer. Consumers retrieve messages from the destination. Multiple producers and consumers can communicate using the same destination. How this is handled depends on the message delivery model we use. There are two basic message delivery models: Point-topoint, and publish-and-subscribe. We'll take a look at point-to-point first. Point-to-Point Model In point-to-point messaging, the producer is also referred to as a sender, the destination is called a queue, and the consumer is considered a receiver. As we said, multiple producers can send messages to a queue, and multiple consumers can retrieve messages from a queue, but the main distinguishing characteristic of point-to-point messaging is that a message can only be retrieved once. Once a retriever receives the message, it is removed from the queue. Messages are always delivered in the order they are sent. The strategy to determine which retriever gets which messages depends on the receiving application, which can use the message headers and properties to determine how to handle the messages. Publish-and-Subscribe Model The publish-and-subscribe model is different and we use it when we want to allow messages to be delivered to any interested receiver. This enables me to deliver the same message multiple times to different receivers. In the publish-and-subscribe model, the producer is also referred to as a publisher, the destination is called a topic, and the consumer is considered a subscriber. Consumers subscribe to the topic, and receive all messages published to that topic. The Anatomy of a JMS Message Let's take a look at the anatomy of a JMS message: Header - Contains general information about who the message is to, when it was sent and so on. These values are defined by the JMS specification, and are usually set by the JMS server. Body - This is the data that the sender wants to send to the recipient, and is sometimes called the payload. Both sender and receiver need to know what kind of message it is, and how to decipher it. Properties - These are key-value pairs that the recipient can use to help filter or process the messages. These properties are defined by the application. As with the message body, both sender and receiver need to know what properties should be set. 292 JMS allows applications JMS allows applications to exchange information without each having to understand the structure of the other. But the two applications do need to have a common set of expectations. Before we can send messages to a consumer, we need to agree on: The delivery model - point-to-point using a queue, or publish-and-subscribe using a topic. The JNDI name of the destination queue or topic. The message type - text, map, byte string, stream, or a serialized Java object. The expected message content -header, body, and properties. Response - if a response is returned, if so the type and format. This agreement is essentially a contract between the consumer and producer, and must be established by the system architects before we can use message-based integration. JMS is a standardized API that is up to providers to implement. J2EE application servers like JBoss, WebSphere and WebLogic provide JMS implementations, as do numerous other third party providers. Message-Based Integration Customers using the IBM WebSphere application server have two choices for message-based integration: JMS and MQ. What's the difference? WebSphere MQ is a proprietary Message-oriented Middleware service from IBM. JMS on the other hand is standard, not a specific implementation where each JMS provider can implement JMS their own way, as long as their implementation adheres to the JMS specification. IBM's JMS implementation is built on MQ. Therefore, in WebSphere, JMS is actually a wrapper for MQ. In Pega we can use MQ directly, or the JMS implementation wrapper. For applications that are deployed as J2EE Enterprise Applications, there are several advantages to using JMS rather than MQ directly: JMS offers support for transactions using the Java Transaction API (JTA) MQ requires a Java thread to constantly "poll" for incoming messages; JMS has better performance because it can take advantage of J2EE queue monitoring Applications that use a non-proprietary standard like JMS are inherently more portable. So when might we want to use MQ? We usually want to use MQ when we need to integrate with a nonJ2EE application that uses MQ. Configuring an application to send a JMS message involves creating four types of components: A JNDI service instance to describe how to reach the JNDI server A producer model instance which describes the producer A Connect JMS rule, which actually handles sending the message A connector activity to call the JMS Connector And depending on how we choose to handle the message content, we may need mappers to create the message content, and parsers to handle the reply. We will examine each of these components using an example case in which we are integrating with a payment provider. After completing a purchase order, we send the ID of the vendor and the amount to pay, and our payment provider takes care of the rest. First, we need to ensure that we have access to the JMS services. If our application is deployed on a Java Enterprise application server like JBoss, WebSphere or WebLogic, then by default we have access to the JMS services provided by the application server. If we are deploying on a web application server like Tomcat that doesn't support enterprise applications, we'll need to configure a third party JMS provider. JNDI Service instance Then we need to create or identify the messaging destinations we will use to communicate with other applications. The destinations are defined using JNDI. JNDI stands for Java Naming and Directory Interface and is a standard Java mechanism to allow Java-based applications to access objects by name. In order to access the JMS destinations we need to create a JNDI Server data instance, which is available under the Integration-Resources category in the Records Explorer. The JNDI server is configured at the application server so all we need to do here is enter the values supplied by the application server administrator. We can also view the objects that are named by this JNDI server by clicking Browse Tree. JNDI does not only handle JMS destinations; it manages all named objects in a Java application. Before deploying a connector or service we need to make sure it is working correctly. There are a number of approaches to testing and debugging JMS Connector and Services. A useful tool provided is the JMS Explorer. JMS Explorer supports only queues, not topics, and only text-based message data can be displayed. The JMS Producer Model, also available under the Integration-Resources category in the Records Explorer, holds messaging settings for the JMS connector rules. Persistence determines whether the message is stored in the database or just in memory, which in turn affects the application's reliability. If the message is stored in memory and the system goes down before delivery, the message is lost. On the other hand, storing the message in the database adds overhead. Priority is only relevant if we are sending to a destination that uses priority. How this is handled is application-specific. Expiration indicates how many milliseconds until the message expires. The default is 0 which means it never expires. It stays on the queue indefinitely until retrieved by a consumer. We could set a higher value if we wanted to make sure the message would only live for a limited amount of time. Domain lets us indicate whether we are using a point-to-point model or a publish-and-subscribe model. Connect JMS rules are found in the Integration-Connectors category in the Records Explorer. The Service tab contains properties that reflect the agreement between our application, which is the message producer, and the service application we are connecting with, the consumer. The service-related properties describe how our connector communicates with the "service", which in JMS means the consumer. Resource Name Resolution specifies how the queue or topic is found. Use Direct JNDI Lookup to select a JNDI Server. Select Resource Reference to use Global Resource Settings to specify the name of the JNDI Server. The Connection Factory, and the username and password are values that have been provided to us since they are configured at the application server level. Destination name is the full name of the queue or topic to which we send the message. As with the JNDI server, this is provided by the system administrator. Responses are not supported for the publish-and-subscribe model. For the point-to-point model, if a response is expected, the sender includes the name of the response destination queue in the JMSReplyTo property of the message. Leave destination and the Response tab empty if a response is not expected. If a response is expected, the producer application stops and waits, and the connection stays open until the message is delivered and a response is received from the consumer. In this case the response queue is usually a dynamic destination. Meaning the JMS server creates a temporary queue that exists just while the connection is open. Once the response has been received, the connection is closed and the dynamic queue goes away. Alternatively, we can specify a static destination name. The error handling section includes properties related to handling of errors that occur during message processing. The status value and status message fields are string properties to hold the results of the attempted message. The standard pyStatusValue and pyStatusMessage properties in the baseclass can be used. In the case of an error, control is passed to the error handler flow specified. In our case, we are using the standard ConnectionProblem flow. Once you've got your JMS Rule set up, use the Test Connectivity button to make sure the settings are correct. The Request tab specifies what data will be in the messages that are sent by this producer. Remember that a JMS message consists of three parts: header, properties and data. Most of the header values are filled in automatically by the connector when the message is constructed, like the destination, or by the JMS provider, like the message ID. There are two header properties we can set: JMSType - used to distinguish between different types of messages, for example, notifications versus requests. JMSCorrelationID - used in asynchronous messaging to associate a response with the original Message properties are name-value pairs that are defined by the applications that are communicating. These are often used by a selector when multiple consumers receive messages from the same queue to determine which messages they should accept, or how they should be handled. Message data is the content or payload of the message itself. The Data Type to use here depends on what data type we set on the Service tab. In our case, we specified Text, so we need to pass a single String. In this example, we use an XML stream rule to create an XML string from a purchase order case on the clipboard in the format expected by our payment provider. The Response tab is applicable only to services from which we expect a synchronous response. On the Service tab, we must tell the connector what sort of data to expect in the response, either selected dynamic destination or a static destination name that we provided. The data settings are similar to those on the Request tab, except instead of describing the data we are sending, we are telling the connector how to handle the data it receives. The consumer and producer must agree on the format of the response. In our case, we are expecting a single value in the response, with the key "status", which we map to a clipboard property .pyStatusValue. String responses can be copied as-is to the clipboard, or can be parsed using a parse rule or function. Connector Activity We need an activity to trigger the connector to send the message. The key step is a call to the ConnectJMS method, and pass the name of the Connect-JMS that sends the message. In our example, we pass the RequestPayment JMS connector rule. When this step is reached, the connector rule creates a message object as we configured it to, establishes a connection with the JMS server, sends the message to the queue or publishes it to the topic we've told it to, and if a response is expected, waits for the response. Remember that a JMS message interaction includes an application that sends the message - a producer; a named destination for the message; and a consumer that gets the message and processes it. Now, let's learn how to configure and use a JMS Service to consume messages from a queue or topic. A JMS consumer is implemented using a JMS listener. The listener is configured to wait on a particular queue or topic for a JMS message (request) to arrive. When a request message arrives, the listener dispatches it to the correct service. It's possible for the listener to dynamically choose which service to use based on message properties, but in this lesson we are only going to cover the case where a listener is configured with a single service. The JMS service maps the data and properties of the incoming message to a clipboard page, and then invokes an activity to process the request. The activity does whatever processing is needed to handle the message, such as creating or modifying a case, storing or retrieving data from a database, or executing a flow action. Optionally, the activity can set return data and the service creates a JMS response based on that data and passes it to the listener, which sends it to a configured response queue. A JMS consumer includes the following components: A JMS Service Package A Service JMS rule A JMS or JMS MDB listener Any necessary data mapping components for mapping message data to and from the clipboard An activity to do whatever processing is required for incoming messages And a JNDI server data instance to locate the destinations by name We can use the Service wizard (DesignerStudio > Integration > Services > Service Wizard) to create a service. Let's have a look at the JMS consumer records. There are two types of listeners for JMS depending on how our application is deployed. If the application is deployed as an enterprise application or EAR, Pega takes advantage of a J2EE feature implemented by the application server called Message Driven Beans or MDBs. MDBs are configured at the application server level to receive messages from a queue and pass those messages to the appropriate enterprise java bean, in this case Pega. Pega can only be deployed as an enterprise application on J2EE compliant servers like JBoss, WebSphere or WebLogic. It cannot be deployed on Tomcat. If Pega is deployed as an EAR, create a JMS MDB Listener rule. If our application is deployed as a web application or WAR, the application server doesn't provide MDB capability, so instead we need to create a JMS Listener, which runs within Pega. Pega can be deployed as a web application on any supported application server, including Tomcat. However, remember that Tomcat doesn't provide JMS services itself. We need a third party JMS provider to use JMS on Tomcat. Service JMS Rule Most of the Service JMS rule settings are the same as for other service rules so we won't get into those here but rather point out the JMS specific ones. The Request tab describes how we want to map the data in the incoming JMS message. The Message Header section describes how to map the standard JMS properties that are set by the JMS provider. The Message Properties section is very similar except that the properties are application specific. We can add a row for any properties we are expecting the message producer to set that we care about, specifying the data type, the property name and description, and how and where to map the value of the property. The Response tab does what the Request tab does but in reverse. It describes how to map data to send a response to the message producer. In our example we are using a publish-and-subscribe model, so we don't send a response. JMS Listener for WAR Deployment Remember that the listener is responsible for receiving messages from a topic or queue and passing them to the service rule. It might also respond to messages if that's part of the integration. A JMS listener runs in its own Java thread, started when our application starts. The listener works by attempting to retrieve a message from the queue. If there's no message waiting, the thread stops and blocks until one is available. When a message arrives, the thread retrieves it and passes it to the service, and then goes back to waiting. Periodically it wakes up to see if it's received a signal telling it to stop. If not, it returns to waiting for a message. Having a long-running thread like this is not supported by J2EE, which is why this model only works in WAR deployments. The JMS listener can be found in the Integration-Resources category in the Records explorer. Depending on what type of service this is and how frequently we expect it to be used, this might have significant impact on our application's performance, so we will need to work with our system administrator to decide how to configure startup and the number of threads. Usually a JMS Service rule is specified in the Service Method field. It is possible to leave service class and method empty and have the message producer pass them as message properties, but this feature is rarely used and is not covered in this lesson. The wait interval is how often the listener checks to see if it's received a shut-down notice. Send error messages is what to do if an error occurs for which the JMS Service didn't generate a response. If that happens and this option is selected, the listener sends an empty message. This option only applies if responses are used in this integration. First thing we need to do is indicate which messaging model we are using point-to-point or publish-andsubscribe. The next section describes how the listener connects to the topic or queue it is waiting on. Acknowledgement is a feature JMS provides to support reliable, in other words guaranteed message delivery. A message is not considered delivered until it has been acknowledged by the recipient. We could choose On Message Receipt, which means we acknowledge before calling the activity which processes this message, or After Message Processing, which means we will wait until the message has been processed by the service activity, in which case we send no acknowledgement if there was an error during processing. If guaranteed delivery is necessary, JMS attempts to re-deliver the message. In the publish-and-subscribe model, usually a consumer that subscribes to a topic only receives messages published after it starts listening. The publisher determines how long a message lasts before it expires, so when our listener starts, there might be messages already published and still present. If we want to be able to receive those messages, we check durable subscriber. Note that only some JMS providers support this feature. Checking No Local Messages to filter out messages we sent. This allows us to use the same queue for messages and replies; otherwise, we'd receive our own responses as new messages. The Request Destination Name is the queue or topic the producer is sending messages to. This is the destination where we will wait for a message. Message Selector is a feature that allows the listener to filter out messages based on the JMS header or properties. Our inventory management system publishes inventory notices on behalf of a number of suppliers, and sets a property in the message called SupplierID. We want to receive messages only from suppliers we use, and ignore the rest. If we entered a message selector string "SupplierID=3 OR SupplierID=8" we would only receive messages relating to those suppliers. In the Response section, the Preference menu lets us indicate how responses, if any, will be sent for messages this service receives. For Preference we can choose Message, meaning responses should be sent to whatever destination is specified in the message's JMSReplyTo property. Use this option if our producer uses dynamic destinations, which are created as needed for each message sent. If we select Listener the JMSReplyTo property in the messages is ignored and all responses are sent to the queue we specify in the Response Destination Name field. Here No Reply is selected because the publish-andsubscribe model doesn't support responses. Now let's look at how to create a JMS MDB listener. Message-driven beans work somewhat differently than the blocking thread model used by plain JMS listeners because they are running in the context of container managed by the application server. The container monitors the queue or topic, and invokes the MDB when a message arrives. This provides better performance and a more flexible architecture, but requires configuration at the application server level, which non-MDB listeners do not. Much of the configuration of a JMS MDB listener is the same as a non-MDB JMS listener, so we'll just highlight the differences. On the Listener Properties tab, everything is the same other than the helpful reminder and that the fields related to the listener Java thread is not there. These fields aren't part of the MDB-based listener because it doesn't run in a thread. On the JMS properties tab, the differences reflect the fact that when using an MDB listener, the application server handles connecting to the request destination and retrieving the messages, so settings related to identifying and connecting to the incoming destination are configured at the application server level are not here. However, it is still responsible for connecting to the destination for response messages we send. So for point-to-point integrations that require synchronous responses, we still need to identify the response destination and connection factory. The key difference is the Deployment Descriptor. Although we can configure the listener, it won't run until we deploy it to the application server. Details of how this is done varies between different application servers, but usually comes down to editing XML files called deployment descriptors. If possible, it attempts to generate a full deployment descriptor file that we can use to replace the one we have. On some systems this isn't possible in which case it generates XML fragments to insert in the existing deployment descriptor file. Click on the link to view the XML fragment. xml mapping data needs review Async Processing Each connector running in parallel makes a copy of the step page and returns the results to its copy. Because of this each connector needs a separate step page even if the connectors share the same appliesto class. If the same step page were to be used the connector finishing last would overwrite the results from the connector that finished first. The request parameters are set in steps 3 and 4. In steps 5 and 6 the Connect-SOAP method is invoked with the RunInParallel parameter selected. The Connect-Wait method in step 7 joins the current requestor session with the child requestors that were created. If the WaitSeconds parameter is -1, the current requestor waits indefinitely until the child requestors complete. A positive integer waits for the maximum number of seconds entered. It is not possible to catch exceptions or check the step status using the StepStatusFail when rule in the connector step transition for connectors that run in parallel, since the result is not communicated from the child to the parent. This also means that the error handler flow specified in the connector is not invoked. Instead, make sure the Error Handling section on the connector rule specifies properties for the status values ensuring that the values are set on the connector page in case an error occurs. When awakened, we need to examine the pyStatusValue on the connector pages for errors. In step 8 we copy the response data to the case using a data transform. In this scenario, two connectors were called in parallel. However, it would be possible to call the connector and perform any other type of task in parallel. In this flow a connector is run in parallel. The flow continues allowing the operator to capture data while the connector is being executed. Later the Connect-Wait is called and the parent and child requestors are joined. Named pages are not passed to child requestors, so do not use named pages in the data mapping on the Request and Response tabs. Because connector simulations rely on named pages it does not work for connectors configured to run in parallel. The parameter page is passed to the child requestor so it can be used in the data mapping for the request. However, the parameter page is not passed back to the parent requestor so it cannot be used in the response. In addition to being executed synchronously and in parallel the SOAP, REST, SAP and HTTP connectors can also be executed in queue mode. Select queueing in the Processing Options section on the connector record's Service tab to configure queuing. When queueing is used, each request is queued then processed later in the background by an agent. The next time that the agent associated with the queue runs, it will attempt to execute the request. The queueing characteristics are defined in the connector's Request Processor. n addition to specifying the intention and request processor in the processing options section within the connector rule, within the calling activity the execution mode needs to be set to Queue when invoking the connector. If Run is selected the connector will execute synchronously. The queuing of the connection request is a database operation, which means that a Commit step is required after the Connect-* step in the activity, or in the parent activity that calls this activity. The name of the queue, how many times the associated agent should attempt to execute the request if it fails, and whether or not the results should be stored in the queue is configured within the connector's Connect Request Processor. The queue class defines the type of queue that is used. It is possible to use the standard class System- Queue-ExecutionRequest-Connect-Default, or to use a custom concrete class derived from System- Queue-ExecutionRequest-Connect- When more than one queue is specified, when rules are required to determine which queue to use. The conditions are tested top to bottom so always leave the bottom row empty as the default queue. The Dequeuing Options tab contains instructions for the agent on how to handle requests stored in the queues. The maximum number of execution attempts specifies how many times the agent should attempt to process the request. Typically, we want to set this value to more than 1 so the agent can try again if the request fails the first time. We have the option to keep the item in queue after all execution attempts have failed or after successful execution. There is a standard agent called ProcessConnectQueue in the Pega-IntSvcs ruleset. The ProcessConnectQueue agent is configured to process items in the default connector queue class. For each custom queue that has been defined an agent needs to be created and configured to process items from it. Specify the custom queue class in the class field. The agent must be of Advanced mode. Specify the standard activity ProcessQueue to process items in the queue. The queue item instances can be viewed in the SMA by selecting Agent Management > System Queue Management. The queue item first gets the status "Scheduled". If the request executes successfully, the status of the queued item is set to "Success". If the request fails, the number of failed attempts is incremented and the request status is either set to "Scheduled" or, if the maximum number of attempts has been reached, "Broken-Process". If the status was set to "Scheduled," the request is re-queued for the agent. If the Connect Request Processor was not configured to keep items in the queue they are deleted after they are processed. For example, in our configuration items were not kept in the queue after successful execution, which means that there are no items in status Success. Asynchronous services might be an alternative for long running services where either the response is not required immediately or can be reattempted if the service, itself, has to wait for some event to occur.. The following describes how requests are processed when a service is configured to process requests asynchronously. An external application sends a request to a service named CreatePurchaseRequest. CreatePurchaseRequest uses the agent queue functionality to create a queue item for the request. The name of the queue, how many times the associated agent should attempt to execute the request if it fails, and whether the results should be stored in the queue is defined in its service request processor. The service queues the request with the status "Immediate" then spawns a batch requestor to run the request. The service returns the queue item ID to the calling application and ends the service call. If the request is executed successfully the service page is populated with the results and included in the queued item, the status of the queued item is set to "Success". If the request fails, the number of failed attempts is incremented and the request status is either set to "Scheduled" or, if maximum number of attempts has been reached, "Broken-Process". If the status is set to "Scheduled", the request is queued for the agent associated with the queue. The next time the agent runs, it attempts to run this service. To retrieve a queued response the external application sends the queue ID to a service called GetPurchaseRequestResults as will be discussed. Configuration To configure a service such as CreatePurchaseRequest to run asynchronously, we need to configure the Processing Options on the service rule's Service tab. There are two options for queued execution. One- way Operation places the request on the queue and then quits. This option should only to be used if the service either does return a response or the response can be ignored entirely. Execute Asynchronously is the second option which we will continue to discuss. For queued services a Service Requestor Processor needs to be specified. Let's have a look at the Service Request Processor in more detail. The queue class defines the queue used. It is possible to use the standard class System-Queue- ExecutionRequest-Service-Default, or to use a custom concrete class derived from System-Queue- ExecutionRequest-Service. If we specify multiple queues we must use a when condition to determine which queue to use. The conditions are tested top to bottom so always leave the bottom row empty as the default queue. The Dequeuing Options tab contains instructions to the agent about how to handle requests stored in the queues. The maximum number of execution attempts specifies how many times the agent should to attempt to process the request. Typically, we want to set this value to more than 1 so the agent can try again if the request fails. We have the option to keep the item in queue after all execution attempts have failed or after successful execution. This should be selected so the external application can call back and retrieve the results. The service page is stored as part of the queued item. Therefore, the service activity should write the result data to the service page so that it is available to map the response. The standard agent called ProcessServiceQueue in the Pega-IntSvcs ruleset is configured to process items in the default service queue class. For each custom queue that has been defined an agent needs to be created and configured to process items from it. Specify the custom queue class in the class field. The agent must be set to Advanced mode. Specify the standard activity ProcessQueue to process items in the queue. Configure the data mapping for the pxQueueItemID parameter on the Response tab for a SOAP, REST, SAP, HTTP, JMS, or MQ service rule as shown below or on the Parameters tab for an EJB or Java service. Note: If the play button on the service rule form is used to test the service, it executes synchronously. The service only execute asynchronously for external requests. In addition to the service performing the actual asynchronous processing, a service to get the results of the asynchronous processing is also needed. In this case we have created a SOAP service called GetPurchaseRequestResults for that purpose. The page class is the same as for the first service. The standard service activity @baseclass.GetExecutionRequest is used to retrieve the service request data stored in the queue item for the request. This service must be configured to execute synchronously. Configure the ItemID parameter on the Request tab for SOAP, JMS, MQ, and HTTP rules or the Parameter tab for EJB and Java service rules. When the service creates its response, all data from the retrieved service request is on the clipboard and available to be mapped. If the external application needs the queue execution status (whether the request ran successfully) it is possible to configure a data mapping for the pxExecutionStatus parameter. It is also possible to configure a synchronous service to queue requests that fail for additional attempts. For example, an external application sends a request to a service that updates a case. If the case is locked an error is returned. In this case we can configure the service to queue the request for additional attempts based on a condition. The service returns the ID of the queued item to the calling application as part of an error message. Note that the service runs synchronously if it does not fail or if none of the conditions are true. The calling application must be configured to respond appropriately to either case. Thus, if it receives the information it requested from the first service it can continue with its processing but if it receives an error with a queue ID, it must call back to retrieve the results. This approach is useful if the error causing the service to fail is temporary and the response is not required immediately. To configure this, set the Processing options to specify that it executes synchronously and the Service Request Processor to use. Select the Queue When option on the Faults tab. Specify the when rule in the When Key field. If the when rule evaluates to true, the system returns an error with the specified data and the request is queued in the same way an asynchronous request would be according to the details in the specified Service Request Processor. If false, the next row in the list is evaluated. If all when condition rules return false, the normal response data is returned. In using SOAP and a when condition evaluates to true a SOAP fault is generated. The string in the Map From Key field is used as the Fault String and the value of the pxQueueItemID parameter is used as the Fault Detail. The configuration is slightly different depending on the service type. Use Help to get the details for your specific service type. Inbound Email The Integration Email landing page helps us get an overview of email accounts and listeners available on the system. Select DesignerStudio > Integration > Email to open the landing page. Let's review each of the tabs. The Inbound tab displays a list of all email listeners on the system. Click on a row to view, add, or edit inbound email details. The Outbound tab displays a list of all email server connections on the system. For outbound email the system looks for an email account with a name that matches the classgroup of the work object. It uses the standard email account named Default if no match is found. The Email Accounts tab displays a list of all email accounts on the system. The Email Listeners tab displays a list of all email listeners on the system. In this section we will learn how we can use the Email Wizard (DesignerStudio > Integration > Email > Email Wizard) to configure our system to process incoming emails. We can use the Email wizard to either configure an email account, only, or to create an email service that lets us create and manage work as well as configure an email account. If we only want to send email then all we need to do is to configure an email account.Instead, let's say we want to create an inbound email service that creates a purchase request for a purchasing application. The create work checkbox needs to be checked if new work objects are to be created by the email service. It also needs to be checked if email-based processing, such as approvals or other actions on a work object, is to be performed using an email reply. The organization is used for newly created work objects. Select the ruleset version in which you want to save the created rules. Next we need to configure the Email listener. Select an existing email account or enter the name of a new email account to be created. Specify the name for the Email listener and the folder the listener is going to monitor. If an existing email listener is specified, that instance is overwritten. Specify the service package to use or enter a name of one to be created. Next specify the Service Class. Select the operator ID and password that the service is to use while it runs. Select Disable Startup to deactivate the listener. In the next screen we need to configure the service package. 331 The Processing Mode should be set to stateless if the services in this package can be executed by any requestor in a pool of requestors without regard to processing that the requestor performed earlier; otherwise, select stateful. Specify the access group for the service package. If Requires Authentication is selected, the service expects an Operator ID and password in the arriving request. Next we need to configure the email account. The email account details are pre-populated if we selected an existing email account. In the last screen we can review the inbound email configuration. Click Next to complete the Email Wizard. 332 The summary below shows the created records. Let's have a look at the records created. The wizard created an email service rule named CreatepyStartCase in the work type class. The Primary Page is set to the work type class and is named ServicePage. The Service activity is the standard activity called pyCreateAndManageWorkFromEmail, with the starting flow and organization parameters as specified in the wizard. 333 The Request tab defines the Message data mapping. The Email wizard maps the Message Header data to the page called pyInboundEmail on the work item. The Delivery Status Notification data is not mapped, we'll have a look at that later. The Message Data is mapped to the pyBody property on the pyInboundEmail page. It is possible to use parse rules to parse the email body. The Response tab defines the response that is sent back when the service processing is complete. If the email was successfully processed a "Thank you" email is sent to the sender. If an error occurs during processing an email with the issue is sent to the sender. The email account record holds the data required to access the email account and contains the data entered in the wizard. The email listener data instance contains information needed to route inbound email messages to an email service rule. It identifies the listener, the email account name, the name of the mail folder to monitor, the message format of the incoming messages, which email service rule to route messages to, and so on. Note that the listener needs to be manually started after it has been created. We just saw that the wizard created an email service with the standard activity Work- .pyCreateAndManageWorkFromEmail. The Work-.pyCreateAndManageWorkFromEmail activity both creates and manages work from the incoming emails. An email related to an existing case contains an identifier linking it to the case. We will look at this in detail in the next section. We can assume a case needs to be created if an identifier is not present. When an email without an identifier is received a case of the primary page class is created by the pyCreateAndManageWorkFromEmail activity using the starter flow and organization specified as parameters. The email data is mapped to the page called pyInboundEmail on the work item as defined on the Request tab. Let's try it out. Before testing the service with an email it is a good idea to check if everything looks good using the Run option in the Actions menu. If the test was successful we can go ahead and test it with an email. We can provide a subject and a body and even an attachment. A response was returned with the work object ID (PR-17) of the newly created case. The confirmation response is defined in the HTML rule Work-.EmailResponse and can easily be customized. Let's have a look at the case created on the clipboard. We can see the email data is available in the pyInboundEmail page. Email attachments are added to the attachments list for the case. If the email is not processed as expected try monitoring the inbox to make sure email messages arrive and are deleted. Make sure that the Keep Messages on Server option is cleared for the messages to get deleted. If messages are not deleted the Email Account might not be configured correctly or the listener might not be running. Check the log for any errors related to the email service. Use Trace Open Rule on the service rule to trace an incoming email to see how it is processed. Emails can be used for work processing in situations where a decision is required, such as a manager review. We can configure an email to be sent to the manager allowing her to either approve or reject the request directly from the email rather than having to login to the application. This functionality requires an email service as configured by the Email wizard. For example, using the standard service activity Work-.pyCreateAndManageWorkFromEmail with request parameters mapped accordingly. If you are using a custom service activity in your email service rule, make sure that this activity is called and the request parameters mapped. Email work processing requires that the ProcessEmailRequest agent in the Pega-ProcessEngine ruleset is enabled; by default it is disabled. We need to configure the parameters on the Notification section on the Review assignment to enable an email to be sent and automatically processed when returned. The following notification options are available. Name Purpose Notify Send an email to the specified party. NotifyAll Send a single email message to each party in the case. NotifyAssignee Send an email to the assignee. If the case is assigned to a workbasket an email is sent to the first operator listed in the contacts list. NotifyAllAssignees Send an email to the assignee. If the case is assigned to a workbasket an email is sent to all operators listed in the contacts list. NotifyParty Send an email to the specified party. In each case the email subject and correspondence name needs to be specified. Notify, NotifyAll, and NotifyParty allows us to send emails to parties rather than named operators on the system. The standard correspondence Work-.pyEmailApprovalActions is typically used as a template when creating a custom correspondence rule containing sufficient information for the manager to make a decision. The pzEmailActions tag causes the flow actions to appear in the outgoing email and must be available in any custom correspondence rule used. Here we just use the standard rule for demonstration purposes. 338 The outgoing email is attached to the case. The email contains two links, one for each flow action. The manager approves or rejects by clicking one of the flow action links in the email and sends the reply. If the case is locked when the approve email is received the sender is notified with an email telling them they can retry at a later time. It is the identifier in the email subject that links the email response to appropriate case and flow action. It might take a while depending on the Email Listener settings before the approval email is processed. The incoming approval email is also attached to the case. 339 Support Email Conversations. There is a standard Service Email rule called CreateEmailFlow that attaches a response message to the appropriate case. It uses the standard ProcessInboundEmail service activity. We need to configure an email listener to use CreateEmailFlow. If we want to create and manage work as well as support email conversations in parallel, separate listeners are required, or alternatively we need to combine the Work- .pyCreateAndManageWorkFromEmail and Work-.ProcessInboundEmail activities in one service. In addition to attaching the email to the case, the email is also forwarded to the party who sent the initial email. Email messages sent from an application can be bounced back for many reasons. The recipient's email address might have changed or been entered incorrectly, or the recipient's mailbox might be full. In such cases the outbound email triggers Delivery Status Notification or DSN messages. Additionally, outbound email messages can trigger Auto-Reply responses from recipients who are, for example, travelling or on vacation. If the email listener finds the string "AutoReply" anywhere in the message, it sets the DSN status code to 2.2.0 and maps the Auto-Reply text as the message body. DSN messages are ignored unless Process Delivery Notifications are selected on the Process tab on the email listener. If choosing to process DSN messages, it is a good idea to determine and implement a business process that performs error handling for DSN messages. For example, we might want to configure the email service to handle emails that were addressed incorrectly differently from those that triggered an AutoReply message. When an Email listener is enabled to process DSN messages, the DSN data is available and can be mapped in the Service Email rule. The standard page Work-.pyInboundEmail contains properties that can be used to map the DSN information . Pega 7 puts the case ID, correspondence ID, and subject into the message's Thread-Topic message header. If the message triggers a DSN, the Thread-Topic value is still intact. It is possible to map the values from the Thread-Topic header using a utility named parseThreadTopicHeader. 341 The parseThreadTopicHeader utility processes the string in the Thread-Topic header and maps the values of the Thread-Topic header to the following properties: .pyInboundEmail.pyThreadTopic .pyInboundEmail.pyThreadTopicWorkID .pyInboundEmail.pyThreadTopicAttachID .pyInboundEmail.pyThreadTopicSubject The information in the DSN and Thread-Topic fields can be used to create a business process for investigating bounced messages. For example, if the mailbox is full, the case can be routed back to determine an alternative way to contact the recipient. If your Email Service is creating new cases we recommend either disabling DNS handling or implementing a business process that handles DSN message since DSN messages might otherwise cause looping resulting in an infinite number of cases being created. REST The REST Connector wizard simplifies the process of integrating with REST services. The wizard walks us through the process of gathering the details of the service. The wizard then creates the records needed to interact with the external REST service. From the Designer Studio menu select Integration > Connectors > Create REST Integration to start the wizard. We are prompted to enter a URL for the REST service. If we have a URL used previously to obtain a response from this service we can paste it in this field. In this particular example we want to integrate with a service that returns airport status and delay information in a JSON format. The wizard analyses the URL and suggests the elements that may represent parameters. Each resource path and query string element in the URL is listed individually. Resource path elements are assumed to be static by default. For resource path elements that are not static, and the value is treated as part of the request by the remote service, you should select Is Parameter as shown below. The system generates a property as part of the request data model and at run time substitutes that property's value for that part of the URL. The Endpoint URL at the top encloses each run time-set parameter's name in parenthesis. In this case we specified the AirportCode as a parameter. Query string parameters are always considered part of the request. A property is created for each query string parameter. If the service requires authentication, click the Edit Authentication link (below) to configure it. It is possible to configure a single authentication profile for the service or different profiles for each selected method. Typically GET or POST is used for obtaining data from a REST service, but PUT and DELETE are supported as well. 344 Use the Configure button to adjust the query string parameters recognized by this method. By default all methods will use the same parameters if we do not adjust the query string.. If we have a file that contains a typical response for the GET method, or, in the case of POST and PUT, a typical request, we can upload that file here. This is used to generate the request/response data classes and properties as well as the XML parse and stream rules to map the data. We are expecting a JSON response rather than XML so it is not applicable for us. The Test button allows us to verify that the service is accessible and returns the expected response. Provide any parameters necessary and adjust the authentication details if required. 345 The response message can be viewed in either Tree or Raw format for JSON and XML. If we are testing the POST or PUT method we can use the tree view to configure the body of the request. In step 4 we need to provide the integration class, connector name, and ruleset. 346 We also have the option to create a data layer. Selecting this creates a data class and data page. Clicking Create generates the integration. We can use the Undo Generation button to remove the records that were generated. Select DesignerStudio > Application > Tools > All Wizards to see a list of all wizards. Use this page to complete a wizard in progress or undo the generation of a completed wizard. 347 Let's have a look at the generated records. Classes and properties are created to hold the request and response parameters. Below the base class, MyCo-Int-Airport, is the AirportStatus class. This class holds the request and response properties for the method in the service as well as the connector and, if mapping is required, holds mapping rules, as well. We can see that the request and response are represented as classes. The query string is also a separate class. Let's have a look at the Connect REST rule. The Service tab was populated with the information entered in the wizard. On the Methods tab only GET is configured. The request query string parameters are mapped to a property. 348 The same is true for the response message data. 349 Configure a REST Service In this example we want to create a REST service that returns the details of a purchase request, with a given ID, in XML format. Similar to other service types the following records need to be configured: REST Service Service Activity XML Stream for the response Service Package Let's start by having a look at the REST service record. The primary page class is the purchase request case type. Here we have defined the ID input parameter as a resource path parameter. The Methods tab allows us to specify the service activity and request/response mapping for each method type. This service uses the GET method only. We haven't specified any header or query string parameters. Alternatively, we could have specified the purchase request ID as a query string parameter as opposed to a resource parameter. 350 The service activity loads the purchase request data into the service page. The response is mapped using an XML stream rule. The XML stream rule assembles the purchase request data for the message. 351 Finally, let's have a look at the Service Package. We can see that our GetPurchaseRequest REST service is listed. Test the service using the Run option in the Actions menu to verify that it works as expected. The REST service is now available to be called by external systems. It is accessible under the following URL: cePath 352 Package, class and method name is the three-part key of the service rule. The resource path is as specified in the service rule.. The password must be base64-encoded. Here is an example a URL for our REST service: urchaseRequest/PR-21?UserIdentifier=Service@ADV&Password=cnVsZXM= Distributed Transactions To participate in two- phase commits Pega 7 needs to be deployed as an enterprise application using an EAR file. In addition to deploying Pega 7 as an EAR, we also need to make sure that the application server uses a JDBC driver with XA (Extended Architecture) support. XA support allows database drivers to handle transactions against different data-stores such as databases, application servers, message queues, and others in an atomic fashion. XA uses a two-phase commit to ensure that all resources either commit or rollback the transaction consistently. Let's look at couple of examples where distributed transactions make sense: Data is saved in more than one database. For example, a case created in Pega 7 saves data to an external database (not part of the Pega 7 schema) while case history and assignment information is saved to the Pega 7 database. If the update to the external database fails, then the history and assignment records must be rolled back. Pega 7 uses multiple resources, JMS and database, in a single transaction. In the same example above, Pega 7 saves the history and assignment information in Pega database and sends the request to update the external system using a JMS queue. Despite deploying Pega 7 as an EAR application, there are a few limitations where interactions cannot use a two-phase commit. We cannot use a two-phase commit when: 1. Database-write operations use SQL queries instead of Obj- Methods. 2. A connector uses a protocol, such as SOAP or HTTP, which does not provide transactional support. 3. Multiple connector calls execute in parallel. Multiple parallel connectors cannot participate in a two-phase commit since running in parallel creates child requestors. There are two types of transactions based on which resource is responsible for starting and committing the transaction: Bean-Managed Transaction (BMT) Container-Managed Transaction (CMT) For BMT, Pega 7 directly uses JTA (Java Transaction API) to begin the transaction and commit or roll back changes. Pega 7 is responsible for the transactions and this is the default state of PEGA 7 system when deployed as an enterprise application (EAR). For CMT, the application server, also called the container, is responsible for the transaction and Pega 7 participates as one resource. Which scenarios use BMT and which ones use CMT? When Pega 7 is accessed by users through a web browser it must use BMT. PEGA 7 service requests can use both BMT and CMT. Transaction Boundaries The boundaries of a BMT are different from those of a CMT. To illustrate the boundaries of a BMT in Pega 7, let's consider an assignment which requires a database write to two different databases, called DB1 and DB2. A transaction starts when the commit for the assignment is triggered, meaning when Pega 7 starts writing the deferred write operations to the database. A transaction ends when the commit or rollback succeeds. Both write operations must succeed for either to succeed; if one fails the other also fails. One thing to note in this BMT example is that the database / JTA transactions occur within the scope of the assignment. A subsequent assignment or any other subsequent business operation can then rely on the results of the database write operations. Now let's explore what happens in the case of CMT. Let's look at another example where an EJB service rule is configured to participate in CMT with PEGA 7 deployed and configured accordingly. The service processing requires two write operations to two different databases as in the previous example. In this case, when the EJB service rule is finished marshaling its response, the write operations are submitted to the database resource manager. The operations are committed if all parts of the distributed transaction succeed. If Pega 7 throws a runtime exception or any other participant in the distributed transaction has a failure, all operations are rolled back. The transaction starts outside of Pega 7 and ends outside of Pega 7. This means that no business operation within the transaction, yet occur after the EJB response is sent, can rely on the results of the database write operations from the same transaction. If using CMT, a Pega 7 application must be designed accordingly. In the above example, if the service were to create a new case, the case will be transient until the container commits the object. Any subsequent actions that need to be performed on the case must be performed in a separate transaction once the case is actually created. All data operations such as the Commit method and the WriteNow parameter of the Obj-Save and Obj-Delete methods are simply ignored. Pega 7 services using EJB, JAVA, or JMS protocols can participate in both BMT and CMT. Pega 7 services use BMT by default and we need to make additional configuration changes to use CMT. JAVA Services: Service Java rules are packaged as JAR files by using a service package data instance. This JAR file will then be integrated in the class path of the external system for it to access the Pega 7 java service. The external application that uses this jar file can manage the transaction in which the JAVA services participate. To do this, update the PegaRULESSession.properties file in the JAR file and add environment entries to it for each of the service methods. For example, to set the transaction attribute for a service method named createPurchaseRequest to required, the environment entry will look like this: TXN_ATTR_FOR_METHOD_createPhonebook=REQUIRED The transaction attribute specified for the service method tells the PRService EJB which of its invokeService methods to use when routing a request to the Java service rule. Then, the PRService EJB uses its method invokeServiceTxnReq to invoke the createPhonebook service rule. EJB Services: Similar to Service Java rules, Service EJB is also deployed as a jar file which serves as a proxy jar to access the service rules from the external system. The external system accesses the service rules as though it is a business method of a session bean (EJB). We need to modify the generated ejb-jar.xml file so that the container in which the proxy is deployed manages the transactions of the EJB services. In the ejb-jar.xml file we need to configure transaction attributes appropriately for the methods (service rules) that the proxy represents. 1. First, we must change the transaction type from "Bean" to "Container" in the enterprise-beans section. 2. Then we add the environment entry for each of the service methods, in this case we added for StartPurchaseRequest. The value of the env-entry-value tells the PRService EJB which of the invokeService methods to use when routing a request to that EJB service rule. 3. Lastly, we add the assembly-descriptor section with the container-transaction entry for each of the service methods. The value specified for the env-entry-name and the method-name value in the assembly-descriptor section must exactly match the service method name which is the third key of the service rule. When Pega 7 is deployed as an enterprise application, JMS MDB-based listeners are available to be deployed as application components and can be managed by the application server. The MDB listener routes messages to the JMS service rule. We need to create a new JMS MDB listener data instance in Integration-Resources category. This instance can determine whether we want messages to be redelivered if something goes wrong while they are being delivered or processed. To do so, select the container-managed transaction field on the Listener Properties tab of the object. When the container-managed transaction field is selected, the MDB starts a transaction when it gets a message from the queue and the service processing participates in that transaction. If there are problems either with the message delivery or with the service processing, the message is delivered again so the service can attempt to process it again. When the container-managed transaction field is cleared, message delivery and service processing occur outside of a transaction. If the service processing fails, the message is not redelivered. PEGA 7 connectors can participate in a distributed transaction. However when the connector is accessed in a user session (the most common use case) it can only be managed by BMT. CMT can only be applied when connectors are launched by Pega 7 services such as Service EJB or Service JMS. When a connector runs in a user's requestor context, transactions are bean-managed. In this case, Pega 7 starts the transaction when the commit is triggered and ends it when the Commit or Rollback succeeds. Therefore, when the connector is invoked, there is no transaction open for it to participate in. Whatever processing and database writes that occurred within the external system occurred separately from any processing in Pega 7 therefore cannot transactionally depend on a commit operation from the application. On the other hand, if the service processing for an EJB, Java, or JMS service invokes a connector, the connector runs in the service's requestor context. Therefore, if the service participates in a container- managed transaction, the connector also participates in that same container-managed transaction. When using a JMS connector in a CMT context, remember that the JMS message is not actually delivered to the JMS service provider until the container managing the transaction issues the commit. This means that the message is not delivered until the container-managed transaction is completed. Therefore, having a JMS connector waiting for a response when running in a container-managed transaction would hang the session at runtime until Pega 7 times out and ends the server interaction. Architecture In contrast to traditional Java applications, PRPC based applications are not deployed as enterprise archives (EAR) or web archives (WAR). Whilst PRPC requires deploying an EAR or WAR, the ear/war has minimal functionality and does not contain any business specific logic or features. An application developed on a PRPC platform consists of a set of process flows, UI components, decisions, declaratives, service levels, activities, etc. - collectively known as "rules". At design time, these rules are created using rule forms and are stored in the PegaRULES database. When a rule (flow, UI, service, etc.) is first called at runtime, the PRPC engine executes a sophisticated 'rule resolution' algorithm to find the most appropriate rule version based on rule name, class inheritance, ruleset, application version and rule circumstance and then loads the appropriate rule definition. Note: Refer to the glossary for additional information about rule circumstance. Many of the PRPC rule types are executed at run-time as java classes and methods. PRPC 'assembles' executable java as needed by generating and compiling java, then storing the class files back into the database table named pr_assembledclasses. When the same rule is accessed again, it reads from this database table until the rule changes. The run-time PRPC environment includes 'engine' and 'application' classes. The core PRPC engine is implemented through Java classes. Starting with PRPC v6.1, most of the PRPC 'engine' is actually loaded from the database table named pr_engineclasses. Storing the engine in the database provides the ability to apply engine maintenance without redeploying any files on the application servers. PRPC utilizes custom Java class loaders. The custom loaders enable PRPC-based applications to be changed in a live system without having an outage or the need to redeploy. PRPC applications are typically hosted inside an application server and accessed by human users via browsers or by partner applications such as a service. PRPC facilities can also be directly embedded in Java applications (JSR- 94 API). Selected PRPC features and facilities may run from command-line java scripts that are distributed with PRPC. Applications developed on the PRPC platform, require two main systems for its operation. 1. A Database Server containing the rules 2. An Application Server that contains the PRPC rule engine To setup a PRPC system, the administrator needs access to these two systems. The database server is used to store the following objects: 1. Transactional data, such as case instances (often referred to as "work objects" or "work items") that are created during runtime. Case history, case attachments, and work assignments are examples of work objects creating during runtime. . 2. Rules that comprise applications that the PRPC engine uses to generate the application code at runtime. It also includes the rules from the Pega rulesets that make up the PegaRULES base application. 3. Reference data instances required for processing a case. 4. The core engine Java classes. 5. The assembled application Java classes. PRPC generates database schema during installation which we discuss in detail in the Installing PRPC lesson. PRPC can generate database schema specific to different vendors, namely Oracle, DB2, SQL server, Postgres and so on. This schema can be directly applied to the database during installation or can be given to the DBA to create the database after the installation is complete. In Pega 7, the PRPC database supports split schema (two separate schemas to store rules and data instances respectively). We will learn more about database tables and their structure in the lesson in the Database Architecture lesson group. In addition to creating a new database, the administrator must also deploy three PRPC application archives as Java Applications in the application server. One of these archives is the actual PRPC engine which is bundled as an EAR (Enterprise ARchive) or as a WAR (Web ARchive). The choice of implementing PRPC as an EAR or a WAR is usually decided by the enterprise architecture design team and in some cases by the project sponsor. PRPC can be deployed as EAR on IBM Websphere, JBOSS, and Oracle Weblogic and so on. When using Apache Tomcat, PRPC can be deployed only as a WAR. Enterprise-tier deployments support Java capabilities like JMS Message services, Two-phase commits, JAAS and JEE Security, etc. PRPC supports heterogeneous application deployment using both WAR and EAR engine deployments within same environment if there is a need for platform-specific services on particular nodes. In theory one could use a different model for each environment (WAR/Tomcat for Dev and QA, WebSphere/EAR for production) but this is not recommended. In general, EAR should be used when you have cross-system transactional integrity (two phase commits), JEE security requirements or enterprise standards for applications to be distributed as enterprise beans. WAR can be used in the absence of such requirements and when simpler configuration and operations are desired. PRPC ships with three main application archives that have to be deployed in the application server (if using EAR deployment) or in the servlet container (if using WAR deployment). 1. Pega Engine is shipped as prweb.war or prpc_j2ee.ear (Note: PRPC ships several vendor specific EARS). The archive specifically contains the classes that bootstrap PRPC and start loading classes from database. PRPC ships with some application server specific EAR versions, refer the install kit and the installation guide to find the file that needs to be deployed. The installation guide provides specific instructions on how the ear or war can be deployed in the application server. Deploying this application archive facilitates users to access PRPC using the URL http://<servername>:<portnumber>/prweb/PRServlet . 2. System Management is the system management tool that we learned about in the previous lesson group must be deployed. The System Management Application (SMA) can be configured to monitor one or more PRPC nodes. SMA helps administrators and system architects to monitor and control agents, requestors, listeners and other processing. Deploying this application archive enables administrators to access the system management tool using the URL http://<servername>:<portnumber>/prsysmgmt . 3. PRPC Online Help provides excellent contextual or inline help on all rule forms and most of the dialogs. To access this help, we need to deploy the prhelp.war. Help is shipped only as a WAR file and can also be accessed using the URL http://<servername>:<portnumber>/prhelp . The actual steps to deploy EAR and WAR vary by application server and are typically done using the administrative tool provided by each application server. In the case of tomcat, we use the Tomcat Web Application Manager to deploy these archives and this is how it looks after the archives are deployed successfully. SMA and Help are usually deployed on a single node in a multi-node clustered PRPC environment. A single SMA can be used to monitor multiple PRPC nodes. SMA and Help are accessible from the browser using the URL or can be launched from the Designer Studio. The URL's for SMA and Help can be defined using the 'dynamic system setting' records in the PRPC database. We will learn about this specific dynamic system setting in the next part of this lesson. As part of PRPC deployment it is necessary to set the SMA and help URLs by using the System Settings Landing page, which is accessed in the Designer Studio by selecting System>Settings>URLs. To access the inline help on all the rules form and to access the help menu, we must set the help URL in the settings page. We must set the SMA URL in the settings page if we want to access the System Management application from the Designer Studio. In addition to deploying the PRPC application archives, we must also configure the HTTP port and a JNDI data source for the PegaRULES database in the application server. (This differs forapplication servers, so refer to the administrator guide of the application server for these details). For WebSphere, WebLogic and JBoss this can be done using the administrative console. In Tomcat, we will need to edit configuration files including server.xml and context.xml located in the /conf directory. Please refer to the PRPC Installation guide specific to your target environment for details. This diagram shows the PRPC components and how they fit in the JEE environment when deployed as an enterprise application. The application server that is shown here is not really a PRPC component but instead the environment or platform on which PRPC is deployed. This is a third-party application. As of now, PRPC is compatible with the following application servers: Apache Tomcat which is a free web container. Oracle WebLogic, IBM WebSphere, and Red Hat JBoss, all three being enterprise application platforms. Within the application server, one of the key components is the "web container" which hosts the two main PRPC archives - "prweb" and "prsysmgmt". prsysmgmt war uses Java Management eXtension (JMX) technology to access those PRPC facilities, represented as MBeans or Managed Beans and hosted inside the entity called MBean Server. Another key component within the big box is the "EJB container" which provides common functions, such as transaction support through the EJB engine and the connection to other enterprise information systems through the resource adapter. The EJB engine is hosted by PRPC prbeans.jar which also provides all EJBs support including the eTier engine EJB. The PRPC resource adapter is implemented through the pradapter RAR (Resource Adapter aRchive). Engine EJBs are stateless EJBs providing an interface between the PRServlet in the Web container and the Rule Engine container in the EJB container. Rules Engine Container is a single container which does all processing and contains static content and data (stored as clipboard pages). Processing in the Rules engine container can be Bean managed or container managed transactions. The database EJB handles secondary transactions such as locking and caching, this EJB uses bean-managed transactions since direct access to the database is required for each transaction. The diagram also represents other JEE frameworks. Let's review them one at a time. 372 The first framework is JAAS or Java Authentication and Authorization Service. It is a java security framework that provides, for example, a representation of user identity, a login service and a service that tests whether a user was granted a specific permission. Another framework is JDBC or Java Database Connectivity. PRPC uses JDBC for all database interactions including persistence. JDBC enables java-based applications to execute SQL statements. Next is the JTA or Java Transaction API. It allows JEE components to participate in distributed transactions within the confines of a Transaction Manager. With the EAR deployment, JTA allows the support of two-phase commits as long as the database driver is an XA-type driver. The JMS server plays an important role in transparently handling many aspects of message pulling or pushing through a message queue or a message topic area. When deployed as a WAR file, PRPC only supports a JMS Listener whereas when deployed as an EAR file, it is possible to implement a JMS MDB Listener. PRPC leverages other JEE frameworks which are not represented in this diagram but are equally used in both deployment models. JNDI and JavaMail: JNDI stands for Java Naming and Directory Interface. In an application server, a JNDI is defined like a tree whereby you add nodes and refer to the nodes rather than referring to what the nodes point to. This allows named java objects of any type to be stored and accessed. We will encounter the use of JNDI in PRPC when creating Data-Admin-DB-Name instances for storing connection details of a database or when creating connection details in integration rules. JavaMail is another JEE API. It supports POP and IMAP protocols only and, as a result, PRPC supports only those two protocols. JavaMail is leveraged during Correspondence rules execution. While PRPC applications usually are entirely self-contained and maintained inside the PRPC database, PRPC has three configuration files local to each JVM configuration that often require some degree of modification or customization by application server administrators. These files are: 1. prconfig.xml - PRPC server configuration file. Administrators must modify this file if they want to 'classify' a node (web, batch, agent, and listener) tune cache sizes, set alert thresholds or other behaviors specific to a single jvm (node). 2. prlogging.xml - logging configuration file. Administrators modify this file to implement log rolling, add custom log files, configure settings for integrating with PRPC 'autonomic event services' and 'diagnostic cloud' monitoring systems and to configure log filtering if required. Again editing this directly must be done only when behavior is specific to a single node. 3. prmbeans.properties - the 'mbean properties' file controls the facilities available to the 'management bean' interface used by the pega-provided system management application or any custom JMX scripting. By default, there are restrictions on certain operations, like viewing user session business data on the clipboard. It is quite common to edit the mbean properties to lift such restrictions in the pre-production environment to provide better centralized debugging capabilities. Editing prmbeans was covered in detail in the system management application lesson. Now, let's see how to access these files to make modifications. The files for WAR deployment in a tomcat server are located in the <tomcat-install- dir>/Webapps/prweb/WEB-INF/classes directory. The files for the EAR deployment are located in <InstallDir>/APP- INF/lib/prresources.jar. [prresources.png] To access the file we need to extract prresources.jar. After editing the files, we need to package them into prresources.jar and then package prresources into the EAR file. After packaging the EAR, we need to redeploy it. PRPC allows administrators to override default configuration file names and locations without having to package or redeploy PRPC. Follow one of these options to change the configuration and logging options. 374 1. Define JVM Custom Properties - In WebSphere we can define custom properties for accessing the prconfig and prlogging.xml file. In other application servers, the custom property can be directly added in the startup file by using the syntax -Dpegarules.config =/<path>/prconfig,xml. 2. Set User.Home parameter - When a default value is set for this parameter in Websphere it instructs PRPC to look for the prconfig,xml and prlogging,xml files at this location. User.home also works in JBoss. 3. URL resource references - Some application servers like Websphere, offer an elegant way of accessing them using URL resource references. In EAR implementations, these are modified by changing url/pegarules.config and url/pegarules.logging.configuration. Refer to the WebSphere Administration Guide to see how to access these reference URLs. Dynamic system Setting Starting in PRPC v6.2, most of the PRPC server configuration settings can be stored in the PRPC database as instances of the dynamic system settings class (Data-Admin-System-Settings, or DASS). To simplify administration and ensure consistent behavior across servers, we recommend that configuration settings be set using Dynamic System Settings (DASS) instead of editing the prconfig.xml. The entries made using DASS are stored in the database table named pr_data_admin. By storing these entries in the database, we avoid modifying the configuration settings across multiple nodes in a clustered PRPC environment. Dynamic System Settings can be added using the Designer Studio. Dynamic System Settings - are in the SysAdmin Category and are accessed using the Records Explorer since the settings made apply to all applications across the system. Not all settings are shipped as DASS in the product and we need to create DASS only when it is required to overwrite the default value. Configuration Categories There are several configuration categories that we can configure using DASS. These categories are: 1. Alerts which contain configuration settings to set alert thresholds. If the threshold value is exceeded, the system writes an entry into the PRPC alerts log file. a. Database operation time threshold can be set by defining a new DSS as shown below. The setting purpose has the following value prconfig/alerts/browser/interactionTimeThreshold/warnMS. By default this value is set as 1000ms (1 second). After entering these values, click Create and Open to display a new rule form. Then enter the value in milliseconds (3000). After this change, PRPC writes an entry to the alert file only if the time exceeds 3 seconds. 1. ProComHelpURI is the help URL which must be set using DASS in a clustered PRPC environment. Owning-Ruleset: Pega-ProCom Type: String Value: 2. SystemManagementURI is the SMA URL which must be set using DASS in a clustered PRPC environment. Owning-Ruleset: Pega-ProCom Type: String Value: 3. SearchSOAPURI helps in identifying the URL of the node where the indexing is enabled. In a clustered PRPC environment, all nodes which do not have the index files connect with the indexed node through a SOAP connection. Owning-Ruleset: Pega-Rules Type: String Value: Refer to the PDN link- prpc-62 for additional categories and information. Class Loading PRPC applications may need to leverage non-Pega external Java classes and jar files to support PRPC implementations. The external java may be from infrastructure or interface vendors, such as IBM MQ Series java classes, or from custom java applications. These external classes may be referenced directly in rules that support Java. PRPC must have access to the appropriate java at 'compile time', when the rule is first assembled, and at run time, when rules are executed. All application servers provide a standard 'library' directory (lib) for the server to automatically load java classes on behalf of PRPC applications. When a PRPC application is loaded, the 'boot loader' logic in the WAR/EAR needs to access the database. To access the database, the application server must load appropriate JDBC drivers from the library directory. To access vendor or custom Java in applications, the class must be both loadable and accessible to the internal Java assembly/compilation facilities. While the application server automatically loads classes from the lib directory, Java assembly requires that each class or jar file be explicitly passed in compiler arguments. To make an external class visible to the PRPC compiler, it must be added to the compiler class paths system setting. defaultClasses: To add additional class files, a semicolon is added at the end to add a new entry. Either a period or forward slash can be used to separate the name of the classes. defaultPaths: This is to include the location of the jar file that is placed in the application server, since the entire path is stored, the file can be placed in any directory. If using a different directory, we need to make sure that the jar is placed in the same directory on all nodes in a clustered PRPC environment. To simplify server administration and configuration, it is possible to load external java code other than JDBC drivers into the PRPC database directly, and let PRPC manage class loading and class compilation. All external java loaded into PRPC is automatically recognized at system startup and does not require explicit listing in the compiler class path argument. To load external java into the PRPC database, one may directly import class and jar files from the Import gadget in Designer Studio. Once imported, the external java may be distributed as part of PRPC "product" rule and data bundles. If we do not want to stop and start the application when adding new classes, we can use SMA to refresh the classes. This can be done by navigating to the Advanced section and click the Class Management link to load that page. In the page, click the Refresh External Jars button to refresh the classes. DB PRPC works on most leading databases such as Oracle, SQL Server, IBM DB2, PostgreSQL and so on. PRPC installation comes with a database schema that stores information on rule, data and work instances in tables. It is expected to use these tables as-is except for the work and custom data classes built for the applications. By default, the New Application wizard creates a custom mapping table for all work classes created in PRPC. Similarly wizards such as the Live Data and the optimization wizards create custom mapping tables for all data and indexed page properties. 379 Persistence Model The PRPC persistence model is designed to work similarly to the Object Relational Mapping (ORM) tools such as Hibernate. A PRPC class is mapped to a database table and the instances of that class are stored as rows in that table. The data gets saved in the pzpvStream column as BLOB Data, properties which have to be exposed are marked for exposure and the table structure can be changed which results in those properties persisted as columns. Not all PRPC classes get stored in database tables. Only tables that are marked as concrete classes and ones that have instances are mapped to tables. Both Work and Data classes get mapped to tables, however they are mapped differently. Work table mappings work on the basis of the work pool they belong to; each class group is mapped to a database table and all work classes that belong to that work pool (inheriting from the class group) gets saved in that table. Similarly Data classes can be mapped to database tables. Data classes can also be mapped as external classes, the primary difference being the external classes do not have Pega specific columns such as pzInsKey, pxpvStream and so on. The BLOB that is stored in the pzPVStream column of the table is obfuscated and is zipped to save space. The obfuscation is proprietary to Pega. For example, an aggregate such as a Value List property can have multiple values. When the system saves an object that includes an aggregate property, its values are compressed together (or "deflated") into a single column. When the instance is later opened and placed on a clipboard, the column is decompressed (or "inflated"). When deflated, the property names and values are present in a single text value. This text value has a proprietary format; the values are obfuscated. The newer versions of PRPC support split schema consisting of a rules schema which includes the rule base and system objects and a data schema which includes the work tables and data objects. Split schemas are mainly useful in performing PRPC upgrades to rule tables without bringing down the server. 380 Connection Pooling Software pooling eliminates JDBC overhead. Further, object pooling also helps to reduce the garbage collection load. PRPC can leverage the JNDI data source defined as part of the application server or the servlet container (in case of tomcat) for the database connection details. While installing the system, it is highly recommended that we set the connection pool size in the application server to make sure users are not waiting to acquire database connections. The PRPC application uses caching which limits interaction with the database. However each submit requires a DB connection to update the work item. Careful introspection of alert log files helps to set the connection pool size. PRPC also functions if we specify the database connection properties in the prconfig.xml, but this should not be used even when PRPC is deployed in Tomcat. In Tomcat the database connection should be defined specifically for our application by defining a resource definition in the context.xml file. In PRPC we need to create a Database Table data instance to map a class to a database table. Database Table is associated to the SysAdmin records category. A Database Table instance maps to a schema name (in this case, PegaDATA) as well as a table name. The PegaDATA and PegaRULES database instance are created by default; we need to create a new Database record if additional databases are used. 381 Cashing PRPC uses several caching mechanisms to enhance application performance. Below is list of PRPC caches. 1. Rule Instance cache - stored in memory 2. Rule Assembly cache - stored in memory and database 3. Lookup list cache - stored in disk (Server File System) 4. Conclusion cache - stored in database and memory 5. Static Content cache - stored in disk (Server and Client File System) 6. Declarative Page cache - stored in memory 7. Declarative Networks cache - stored in memory Most caches are stored in memory except a few that use the file system or database. If caches are primarily built to restrict accessing rules from database each time, why do we save some of these caches in database? We will learn about the significance of storing caches in the database when looking at the Rule Assembly in detail. Cache content is automatically controlled by the engine and PRPC initializes the cache using default values for all caches. The default values are usually a good starting point however each application is unique and it's hard to come up with a number that can work for all applications. It is extremely important to look at conducting some load testing to simulate real time performance. This helps in understanding which settings need some adjustments. In general, it is highly recommended that you work with the Pega Support team to fine tune caches but let's learn about the various caches, what the default values are for each cache and how to modify these values if required. This The system then searches the Rule Instance Cache to find the rule. Cache returns one of the three things: Cache Hit - Means that the rule data is stored in the cache Cache Miss - Means that the rule has not been requested before, check the database Not found - Means that the cache has registered that the rule was not found in the database When a "cache miss" is returned, the system then requests the rule from database and if found, returns the rule. To improve efficiency, the system does not record the rule in cache until the same rule has been requested multiple times (three and in some cases more than that). The system keeps track of the number of times a rule is requested and keeps the entries in probationary state, so even if the rule is recorded as found in database, the system makes requests to the database every time until it has been accessed multiple times. When the rule gets updated, the system checks the cache for any rule related to the changed rule and invalidates those entries, so that subsequent requests for this rule go to the database and get the updated rule information (rather than getting the outdated information from the cache). The rule cache has two distinct parts.First, the "alias cache" which captures rule resolution relationships and provides the pzInsKeys of the candidate rules. The cache is structured as a hashmap with the key assembled from a hash of the requestor's RuleSet list, the rule class, the applies-to-class of the rule and the rule purpose which is basically the rule name. As structured, multiple keys may link to the same exact rule. Now that the system is able to access the candidate pzInsKeys through the alias cache, the instance cache which is the second part of the rule cache, allows us to link the pzInskey to the content of the associated blob. The instance cache captures the blobs from database and stores the XML format in memory. The rule cache can grow substantially and take upon a growing size of the total memory available for the system. To prevent this, we would invest time in tuning the cache size. Rule Instance cache details can be viewed in SMA by expanding the Advanced section and then clicking Rule Cache Management. The Rule Cache Summary section lists the instance count (this is the same count that we saw on the Memory Management screen). This section also provides additional details for the cache hits (rules found), cache misses (rules not found) and so on. The SMA can also be used to tune rule cache size. Rule Instance cache sizing is extremely important. By default, the rule cache is set to 3000 entries. We can change this limit by using dynamic system settings. We can create a new dynamic system setting (DASS) with the Setting Purpose defined as: prconfig/cache/instancecountlimit/default We do not want this number to be high or low and PRPC allows tuning only by setting the instancecountlimit, value which can be determined using the procedure described below. 1. Developers should start by testing all the important parts of the application- create, modify and resolve cases, run the most commonly used reports, make sure all the agents that are used in the application are enabled and execute without any errors. Login as different operator profiles and so on. It is important for the user to run all the rules. Doing all this will make sure all the rules are preassembled. 2. Now, clear the Rule Cache using the Clear Cache button in the Rule Cache Management page in the SMA. After clearing the caches, repeat testing all the important parts of the application and then go to the Rule Cache Management page in SMA and check the instance count. 3. If the instance count comes out to be anything significantly higher or lower than 2000, then modify the instancecountlimit using DASS. Set the instancecountlimit to 1.5 times the instance count you noticed in the testing. In a multi-node clustered environment all nodes should use the same value except if a node is designated to serve a specific purpose such as processing only for agents or services. Important Note: Apart from the tuning procedure, the Clear Cache button should not be used at all because clearing the Rule cache slows the system down. This cache was created to provide efficiency in rule lookup; clearing the cache means that all the cached information will have to be rebuilt. Setting size is critical since setting a value too high is not good because too many rules gets cached taking up more memory than required. Setting a value too low requires the system to access the database frequently. PRPC also improves rule cache efficiency by using two types of mechanisms to clear entries in caches, the first one is pruning and the second one is draining. Pruning is triggered when the instancecountlimit is reached. The system uses the Most Recently Used (MRU) algorithm to delete the old ones to make room for the new ones. Draining happens on an ongoing basis, every time a rule is read from the database and added to cache, the oldest cached item greater than the minimum age threshold is removed from the cache. Rule cache should never be cleared unless lots of pruning and draining happens. At times it is harder for developers to predict and simulate the correct size for a production system. In this case, if performance testing indicates a lot of cache misses, the administrator might need to perform the steps above in production to estimate the cache size. When accessing the Rule Cache Management page in the SMA, pay close attention to the Instance MRU section. The Limit Size is what is set using dynamic system settings. The target size and max size are computed based on the limit size. The current size reflects the number of rules currently cached. Amongst the pruned counts in the second line, MaxPruned should never be greater than zero. Cache tuning exercise should happen in this case. DrainingPruned displays the number of rules that are getting pruned, once the current size exceeds target size. LimitPruned appears when the current size exceeds limit size. This should not occur, and when it does occur it makes sense to increase the limit size before max pruning occurs. The Instance MRU (Most Recently Used) section displays the MRU details, pruning and draining. These are used by PRPC to remove the entries in cache. We will learn more about this in the next section. There are three types of reports offered by PRPC. We can click the report button which gives the option to export the cache results to a csv file. The Instance Cache report contains information about the cache status (hit, miss, etc.), rule aliases, cache hits, hits per second, estimated size, added (date and time), accessed (date and time) The Rule Resolution report contains details on Rule Resolution Alias keys. A rule resolution alias consists of the class name, the rule identity and a hash of the user's ruleset list. When the Alias is cached, each of its candidate instances (narrowed by rule resolution logic) should be in the Instance cache entry, if it does not exist, it creates an entry and mark the status as Preloaded. The Rule Identities report contains information such as the rule identity name, rule name, rule class, ruleset list and requested applies to class. We can get cache information for a specific rule type or a particular rule by entering them in the text boxes above the rule cache summary.. Rules Assembly is the process whereby all of the rules which are run by the Pega 7 system for which Java compilation and generation is needed and completed. It actually provides access to the constructor of the java class generated. Since Rules Assembly is an expensive process (in terms of system resources), the process has been broken down into these four steps to maximize performance and efficiency. When the system calls a rule to be run, if that rule was never executed, the Rules Assembly process must: Assemble the rule, including all the other rules required. For example, if this is an activity rule, it may call other rules. This requires calculating which rules apply to the current user's request and context. Code generation logic creates the Java code for the rule The Java code is compiled and the resulting class is saved to the database The class must be loaded by JVM for execution Of these steps, the code compilation is the most expensive. We use cache to remember which class applies to what request in order to avoid repeating any of this work. The techniques used to cache assemblies and avoid extra work have changed over the past few PRPC releases. In all PRPC releases prior to v6.3 this was referred to as FUA (First Use Assembly) cache. The FUA cache stores the key which is a combination of the requested rule name, the Rule-Obj-Class to apply it against, and the user's ruleset list. The assembly process also uses "inlining" which means the logic from many rules are included in one assembled class. The cache metadata is stored in three tables in the database so that this can be shared across other nodes and is retained even when the server is restarted. Caches are grouped based on the ruleset list and if multiple users share the same ruleset list they are able to share the cache content. Rule Assembly cache details can be viewed in SMA by expanding the Advanced section and then clicking Rule Assembly Cache Management. Application Based Assembly Cache (ABA) In PRPC v 6.3, the cache was grouped in applications instead of ruleset lists. This reduced the number of assemblies considerably and the size shrunk hugely by avoiding redundancy. ABA stores the Rule Asembly cache in three database tables pr_sys_appcache_shortcut: This table stores the assembly cache, mapping cache key (rule name, requested applies-to class, and top level application) to an assembled class name pr_sys_appcache_dep: This table stores the inlined rule names for each assembled class pr_sys_appcache_entry: This table stores the assembly cache, mapping cache key (rule name, requested applies-to class, and 'owning application') to an assembled class name In addition to this, to support the sharing, the system also maintains the application hierarchy and rulesets in database tables namely pr_sys_app_hierarchy_flat and pr_sys_app_ruleset_index respectively 389 . The Application Based Assembly Cache Management page in the Advanced section in the SMA helps in viewing the ABA. ABA is used in 7.1 applications as well for all UI rules such as Harness, flow actions, sections, and paragraphs. There are various reports that provide additional information on caching entries that are stored in both memory and database. . It is essential to look at the count field in both ABA shortcuts and Assembled Class entries to see how they compare to the target, limit and max size. It might require resetting these entries depending on how many entries are being used. The ABA cache detail can be viewed for a specific rule by entering its name in the Rule Name field and clicking the ABA Cache detail button. This lists all the entries in the table for that specific rule. VTable In 7.1 ABA has been replaced by the Virtual Rules Table (VTable), which does not include inline rules and eliminates the context. VTable cache is stored in the database table named pr_sys_rule_impl and it contains only the mapping of the rule to the assembled classes. The VTable cache stores the key which usually is the combination of (Rule Class and the purpose which is the name of the rule). When the rule gets saved, it creates an entry in the pr_sys_rule_impl table, saves the assembled class in the database table named pr_assembledclasses. When the rule gets invoked, it looks for the VTable cache and gets the mapped class name by searching the purpose. VTable caching offers several benefits such as the drastic reductions in the cache size and database table size, zero contention since updates happen only on the rule save and improved the performance significantly because the product ships with all Pega shipped Rules preassembled in the database table. VTable cache does not require any configurations since a very small footprint is stored in the table. In rare cases, when the system pulse throws some exceptions or if rules are not consistent along different nodes, we have the option to use SMA to reload rules to make the same rule appear in all nodes. To do this, we navigate to Virtual Rule Table Cache Management page in SMA and then enter the name of the rule which we want to reload. The page refreshes to show the options to reload and reassemble that specific rule. Reassemble triggers assembling the rule again, this might be required in rare cases when the log files show exceptions such as UnresolvedAssemblerError. The Static Assembler is useful in pre-assembling rules that are developed by your development team. Running the static assembler builds the VTable and Appcentric assembly cache. When the rules are 391 accessed for the first time, the rule assembly kicks in which takes a big performance impact. To avoid this, it makes sense to run the static assembler when new application rulesets are imported. The static assembler can be run from the Designer Studio by navigating to System > Tools > Static Assembler. There are two caches that use File System for its storage; they are the Static Content Cache and the Lookup List Cache. Let's look at them one at a time. Static Content cache holds text and image files corresponding to the extracted text file and binary file rules. It is actually cached in two places, on the server and on the client. Static content refers to files that rarely change. Some examples include: 1. Binary file rules - image files of png or jpg extension 2. Text file rules - javascript and CSS used by PRPC rules 3. Form file rules - older rule forms, most of them are deprecated in 7.1 4. EForm file rules- rules that are used to generate PDFs When they are requested, these files are extracted and stored in a directory named StaticContent in the application server. They are stored in the server's file system so even restarting the server does not remove these files unless they are manually deleted by the user. Some of these files are stored in the client machine, typically on the browser cache. When users request data, it retrieves the data from the browser cache, which eliminates the need to request information from the server. By default, PRPC sets an expiration tag of 24 hours, so the file does not get updated from the server for 24 hours. Users can still use the browser options menu to clear this cache. Static Content cache stored on the server file system is structured as sub-directories of the StaticContent common directory in the Pega temp directory. If you need to clear the static content cache at the next startup, delete the PegaRULES_Extract_Marker.txt file from the Pega temp directory before restarting the system. PRPC also creates the sub-directory names with a hash code derived from an entire ruleset list. Static content stored in the server can also be managed using SMA in the ETier Static Content management page in the Advanced Category. There are reports and the summary provide statistics such as the number of files written, the number of files read from database, the elapsed time to write files, read files from database, time to invalidate rules, etc. This page also offers the ability to clear caches individually, such as lookup list, rule-file-cache, rule file and service export cache. The WebTier Static Content management page displays the cache information for the files on the web server such as images, javascript files, css files. Static Content caching performance can be improved by exporting the static content caching to edge servers. If the users are located all around the globe, users connecting to PRPC might see some performance issues when accessing the files since the cache is stored in a server and it depends on the network speed. Edge servers are web servers that are installed closer to the user's location so static content caches can be quickly accessed. Once Edge servers are installed, users who can access those servers should be assigned to a specific access group. Then we need to customize the stub activity shipped in the product (ApplicationProfileSetup) to set the edge server URL and then save it in a production ruleset. The production ruleset should then be added to the access group. Use the Extract Edge Server Files landing page to create the zip file and then import it to the corresponding edge server for the users to access them. Refer to the linked PDN article for more detailed information on these steps. How to improve response by distributing static content to remote edge servers:- content-to-remote-edge-servers (Node ID: 11728) Lookup List Cache Lookup list cache stores data instances that are requested by rules that do a lookup on the PRPC database tables. This contains the XML format of the lookup list information displayed in drop-down boxes such as Smart Prompts or other list based controls. They are stored under the LLC /Class Name directory under the Pega Temp Directory. These results are then saved as an XML file or as a gzip file in the temporary directory on the application server disk. The gzip file is used when lookup list cache is served to the web browser and the XML file is used to populate the clipboard. The system automatically deletes these files if the list becomes stale due to some operations. This file should be cleared rarely, but should definitely be cleared on the occasions when you import a file that updates these records or when there is an exception in the system pulse. Certain rules such as classes, field values, properties, property-alias, etc. are saved in the Conclusion cache. These rules are not saved in Rule cache and the rules also do not require assembly or compilation. This cache is , saved in the database table named pr4_rule_sysgen.It groups similar rules, hence searching is quicker even by accessing the database. The most common example is Field Value, (one conclusion for all locale values) conclusion cache contains only the bare minimum information needed to run the rule. This cache also uses memory caching, the metadata information about these rules are stored in memory. The conclusion cache can be viewed in SMA in the Advanced category. The Conclusion cache page lists the details of each cache across rule categories (property, class, field value, etc.), it lists the size estimate, instance count, prune age, etc. It also lists options to clear cache for both memory and database. Clearing this cache has an adverse effect and hence should not be done. Starting in PRPC v6.2, the conclusion cache parameters were externalized and can now be overridden using dynamic system settings. The following table shows the default values. To override these values, we should use the following as the setting purpose defined as: prconfig/conclusioncache/typeName/minimumsize (typeName = Property or any others in the list) prconfig/conclusioncahe/typeName/pruneatage (value entered in second) In the list of factory reports, scroll down to locate the Property Reference Pool Report. Make sure that the Pruned field is 0. If you see "2nd class objects now ignored" then the system performance is being affected. What does this mean? It means that the property reference pool has stopped tracking the embedded properties such as pages, page lists and page groups because the pool limit has been reached. This severely impacts performance while iterating those properties. Declarative Page cache This cache supports the data pages that are being used in the application. A data page stores data which is used by applications during case processing. Data pages are invoked declaratively- when a rule accesses a data page, the system looks for that data page in memory, if the page does not exist in memory, the system looks for the data page definition (rule) and then loads the data page in memory. The data page exists in memory for the next access, so when we access it for the second time the system picks the data page that is already loaded in memory. To avoid stale data, the data page can be configured to refresh on a specific condition, time interval or whenever it's being accessed and so on. Data pages are also scoped to decide who can access this page - REQUESTOR (data page is accessible in all its user session), THREAD (data page is accessible only in that specific THREAD, for example, the page is available for a case and if the same requestor requires information for another case then the cache has to be populated again) NODE (data page is accessible by all requestors logged on that server). NODE scoped caches remain in memory until node gets restarted, while REQUESTOR scoped caches remain in memory until the user logs out. The Declarative Page Cache management page in the SMA (Advanced Category) lists the options to clear data pages that are defined as Node scope. This is useful for administrators and developers to clear node specific data pages when the application renders stale data. This is one of the few caches that can be cleared ad-hoc though it is not mandatory. Declarative Network cache This cache supports how declarative processing functions. PRPC declarative rules are executed by the engine using declarative networks which are triggered either by forward chaining (calculating the target based on the changes on any of the source properties) or backward chaining (calculating the target when it's requested and the value is null). The declaratives rules management page in the SMA (Advanced Category) provides details on this cache. This cache still uses the ruleset list to generate the cache and it lists various declarative networks that are associated with each ruleset list on a specific operator. When we select a specific ruleset list and click Get Cache detail we see a list of all classes and then we can click through to get the list of all rules defined in the class. All the declarative network instances are also stored in the pr_sys_decchg database table and is mapped to System-Declare-Changes class. We can use the reports that are shipped as part of the product to run and see the details of these instances. This act as helper tables in constructing the cache that is available on memory and also stores the relationships between various rules, however caching happens primarily in the memory. When PRPC is installed on a multi-node system, a copy of the various caches are stored on each node, and each of those nodes must be updated with rule changes. This update process is managed by the System Pulse functionality. Saving a rule change (update, add, or delete) to one of the Rule tables in the database fires a PRPC database trigger, which then registers the change in the pr_sys_updatescache table (in the same database). In addition to rule updates, there are other events which add entries to this table. The types of event which are saved to this table include: Cache - for any changes to the rule cache (updates or deletes) Index - when the Lucene index is changed DELLC - when the lookup list cache is deleted RFDEL - when any static content or rule-file- is deleted IMPRT - when import occurs and it clears the lookup list and static content cache RUF-X - when a function library is regenerated Saving a rule change also automatically invalidates the appropriate information in the caches on that node; however, all the other nodes in the system now have out-of-date information about that rule. Every 60 seconds, the pulse (which is part of the standard PegaRULES agent) on each node wakes up (independently) and queries the pr_sys_updatescache table. The query retrieves records which are "not from this node" (which this node did not create), and which have a timestamp which falls within the window of time starting with the last pulse (or system startup) and now. In this way, all of the changes originating on other nodes are selected, and the appropriate cache entries on this node are invalidated by marking them as dirty. The next time one of the rules which has been invalidated is called, it is not found in the cache, and the updated version of the rule is read from the database and is eligible again for caching. System Pulse activity can be seen in System Management, under Administration > Pulse Status Page. Managing HA apps The concept of clustering involves taking two or more PRPC servers and organizing them to work together to provide high availability, reliability and scalability than can be obtained by using a single PRPC server. PRPC supports both horizontal and vertical clusters (scaling). Horizontal scaling means that multiple application servers are deployed on separate physical or virtual machines. Vertical scaling means that multiple PRPC servers are deployed on the same physical or virtual machines by running them on different port numbers. PRPC natively supports a combination setup which uses both horizontal and vertical clusters. A cluster may have heterogeneous servers in terms of hardware and operating system. For instance, some servers can use Linux; some can use Windows, and so on. Usually, the only restriction is that all servers in a cluster must run the same PRPC version. What does clustering involve? Redundancy across all tiers of an N-tier architecture - there can be multiple load balancers handling traffic. For true high availability, there can be multiple load balancers handling traffic, multiple JVMs using Physical (horizontal cluster) and/or Virtual (vertical cluster) machines. Similarly, we can also have redundancy in Shared Storage repositories and database servers. A Shared Storage repository is the key component in achieving high availability because it's used for crash recovery and Quiesce (which we will learn more about later in this lesson). A Shared Storage interface allows PRPC to manage stateful application data between other PRPC servers. Out of the box, PRPC supports a Shared Storage system which can either be a shared disk drive or use NFS. Both of these cases require read write access on those systems for PRPC to write data. If organizations decide on a different Shared Storage system then they need to make sure the Shared Storage integrates with PRPC. Pega qualifies a server as High Availability if it is available for 99.99% of the time. This means that the system should be down for a maximum of 53 minutes over a year. What does the 53 minutes include? This includes any unplanned outages due to a system crash and all planned outages for upgrading the system. PRPC must be deployed on application servers such as websphere, weblogic or JBoss. These servers offer features such as shared messages and buses to handle services and listeners during planned or unplanned outages. High Availability Roles PRPC comes with two roles (PegaRULES:HighAvailabilityQuiesceInvestigator and PegaRULES:HighAvailabilityAdminstrator) that can be added to access groups for administrators who will be managing high available applications. HighAvailabilityQuiesceInvestigator is given to administrative users who perform diagnostics or debug issues on a quiesced system. When a system is quiesced, the system reroutes all users other than the ones having this role. By default, a privilege named pxQuiesceAdminControl is created for granting this access. The HighAvailability Administrator role in addition to the pxQuiesceAdminControl privilege, offers the ability to access the high availability landing pages in Designer Studio. In general, High Availability Cluster management is performed using AES or SMA rather than using the landing page since Designer Studio updates rely on System Pulse for the update to be applied on other servers. This mechanism is slower (requires at least two minutes for the system pulse to update other servers) than using AES or SMA. Similarly, the High Availability Cluster settings are set using Dynamic System Settings that applies to all servers. Let's look at the different options to update these configuration settings. Setting Configuration Values There are three ways you can set the configuration values . 1. Using prconfig.xml - This approach requires making configuration changes to each PRPC server. This approach can be used on cases where we want to make changes only on specific PRPC servers. 2. Using DASS (Data-Admin-System-Settings, also known as dynamic system settings) — You create a new DASS instance which is stored in the database and is accessible to all servers in the cluster. 3. Using a shared prconfig.xml file - We use a shared drive or NFS to store the prconfig.xml and all PRPC servers can be configured to access this using JNDI settings. Pega Applications have four sets of requestor types logging in to an application. They are: 1. Browser Requestors are all users, including developers who are accessing the application through the browser. The requestor IDs all start with the letter H. 2. Batch Requestors are background processes that are performed by PRPC agents (daemon processes) and child requestors. The requestor IDs start with the letter B. 3. Application Requestors are created when PRPC is accessed as a service from an external system or when the PRPC listener triggers a service request, the requestor IDs start with the letter A. 4. Portal Requestors are created when PRPC is accessed as a portlet using Service Portlet rules. The requestor IDs start with the letter P. The requestor management page in the SMA (System Management Application) identifies all the requestors that are currently logged into a PRPC server at a specific time. Notice that it gives the number of browsers, batch and application requestors and also their IDs and the client address (machine name) from where the requestor is logged in. This page allows administrators to monitor these requestors individually by selecting the radio button and then by clicking the button above to perform a specific task such as looking at the performance details of the session or stopping the session. The requestor type is a data instance record and it exists under the SysAdmin category. We can see the requestor type definitions using Records Explorer. Alternately we can also use landing pages (System > General). 406 There are two sets of requestor types and the current version of PRPC uses the one with pega (all lowercase) as the system name. The requestor type using prpc as the system name was used in prior releases and exists for backward compatibility. This page also lists all the PRPC servers connected in the cluster. Notice that it displays both horizontal and vertical clusters. If we click the BROWSER link in the TYPE column, it opens the associated requestor type data instance. We see that it has an entry for the access groups and uses a standard access group named PRPC:Unauthenticated. When we open the access group record, we see that it provide access to the standard PRPC application (configured in the APPLICATION). The users belonging to this access group get guest access (configured in the ROLES). When the user accesses PRPC using its URL, they are presented with a login screen. The default login screen is defined as part of the PRPC application, so all unauthenticated users should have access to this screen to view them. Even in cases when they use a third party sign on such as LDAP or single sign on (SSO), PRPC requires guest access for BROWSER until they successfully login to the application. Once they successfully login to the application, they switch to a different access group with their own access group. Requestors belonging to type APP or PORTAL also get the same access group (PRPC:Unauthenticated), if the application requires a separate access group we may need to modify it to use a different access group. BATCH Requestors use a separate access group named PRPC:Agents which was used for legacy (older version) agents. This was configured to provide a separate role named PegaRULES:BATCH for PRPC agents. If the agents we create use this access group, we may need to modify it to make sure it gets the access to the application. A clipboard page is a data structure that holds name-value pairs. A clipboard page is created based on what the user performs at runtime, when they create a case; it typically creates a user page for storing the case details and also has a few other top level pages for storing static data, user information, application information, etc. In a typical user session, there are a lot of pages created and these pages exist in memory unless the application clears them out after their usage. A thread or PRThread is an object created by PRPC in memory. PRThread consists of a set of clipboard pages and it provides the context in which the user is making changes. Thus, a PRThread is not related to a JVM thread but is a namespace used as a container for clipboard pages. For instance when the user is creating a new case, PRPC creates a PRThread and it creates various clipboard pages as part of that thread while the user is working on that case. In a requestor session, the system generates multiple threads so users can work on multiple cases in parallel. When a developer works on Designer Studio and opens 10 rules, it creates 10 separate PRThreads and multiple clipboard pages in each of them. Having all these in memory significantly enhances productivity and performance since the data is in memory and users can switch between tabs to work on multiple cases or rules. The only downside is that it ends up using lot of memory footprint. 409 Passivation and Requestor Timeouts BROWSER requestor sessions get closed in one of three ways: 1. When users log off and close the browser 2. When the PRPC server is restarted. 3. When users get timed out. If users do not log off, their sessions remain in memory along with all open PRThreads and clipboard pages. PRPC uses timeouts to clear these idle requestor sessions, PRThreads and clipboard pages from memory thereby the resources are utilized mainly by active users. The timeout used to clear requestor information from memory is known as Requestor timeouts. By default, a requestor session times out after 60 minutes of inactivity, while a PRThread and clipboard page times out after 30 and 15 minutes respectively. These values can be overwritten using dynamic system settings. When the requestor timeout is reached, the corresponding information is saved in the disk by default. The requestor information is retained on the disk for a specified amount of time after which it is removed. This process is known as Passivation. PRPC by default passivates data on timeouts. If users access the same session, then the system restores their session back to memory. This reverse process of restoring the passivated data back into memory is known as Activation. Passivation is performed by a background daemon which runs periodically and looks up the idle requestors and then moves data to the disk. When the data is again retrieved within 24- 48 hours this data is cleared from the disk as well. Again we can customize the dynamic system settings to save it in a database instead of disk (change prconfig/initialization/persistrequestor/storage to database instead of filesystem). However, passivated data cannot be saved in the database for applications that require high availability. When using disk, it uses the temporary file directory by default and we can use dynamic system settings to set the directory location where we want these files to be saved (changing prconfig/storage/class/passivation/rootpath). In case of High availability applications, session passivation is stored in a shared disk so that the session can be activated in any of the nodes. High availability applications can also use custom passivation mechanism by using custom classes to implement passivation. Look at the article titled Creating a Custom Passivation method on the PDN that is in the related content for a sample implementation using Memcached. Passivation helps in managing heap sizes as idle objects are cleared from memory frequently. Lowering the default timeout values a little bit helps in clearing idle objects from memory quicker. However, we need to make sure that it does not passivate really soon, for example if we set a requestor session to time out in 5 minutes, then the system might end up activating way more than its required since their sessions are passivated way too soon. You can check the Passivation settings using the SMA. PRPC uses another timeout named Authentication timeout. This timeout is configured in the access group record. PRPC forces users to login after this timeout is reached. In cases where external authentication is used this timeout is disabled. If authentication timeout is expired then the user should login again to activate their session. Load balancing is a way to distribute the workload across multiple clusters in a multi-node clustered environment. Since the requestor information (session, PRThread and clipboard page) are stored in memory, PRPC requires all requests from the same browser session to go to the same JVM. In network parlance, this is also known as sticky sessions or session affinity. For high availability applications, the load balancer must also support: Automatic monitoring for any failure detection, the terms are usually defined during requirements Ability to disable a JVM so that it does not allow any new user sessions but allows existing user sessions to continue. Disabling is done to facilitate shutdown. Scripting capabilities to support cookie management, allocation of work to jvms, and so on. Load balancing can be achieved by using hardware routers that support "sticky" HTTP Sessions. Cisco systems Inc. and F5 Networks Inc. are examples of vendors who offer such hardware. In addition there is also software, virtual and cloud-based load balancer solutions (Amazon EC2's elastic scaling) that are available. Session affinity or sticky sessions can be established in many ways, and PRPC uses cookies to manage sticky sessions. Using cookies is the preferred option and is commonly used in the industry. The cookies can be set using the session/ha/quiesce/customSessionInvalidationMethod in prconfig.xml or in dynamic system settings. Failover support allows requestor sessions to continue on a multi-node clustered environment when outage occurs. The outage can happen in two ways - the server where the user is logged in crashes or the browser where the user accessing the application crashes. Failover strategy is determined based upon the cost and time effort. There are broadly two types of strategies - cold failover and hot failover. All high availability applications require hot failover. Let's briefly look at them. In a cold failover, load balancing systems send a heartbeat to the nodes to check if they are still running. This heartbeat can be configured in the application server and can be sent each minute, or once every two minutes or so. When the server is down, it sends a notification to the load balancer and moves the sessions to another server. The session information that is not stored or committed to database is lost. In case of high available applications, we can enable failover by changing the passivation setting to use shared storage (Changing storage/class/passivation: /rootpath). This setting is usually set using the dynamic system settings. When configured, the requestors accessing a JVM are redirected to another JVM through the load balancer when the server crashes. The user must authenticate again to establish their session in the new node. All information that is not committed is lost in the process. We can achieve that by configuring another setting (session/ha/crash/RecordWorkInProgress=true). This setting stores the user interface metadata in the shared file system. During a crash, this setting helps in redrawing the user screen to bring back all values and restore the user interface to the previous state. These are the detailed steps that occur to help recover a crashed session. 1. PRPC saves the structure of UI and relevant work metadata on shared storage devices for specific events. When a specific value is selected on a UI element, the form data is stored in the shared storage as a requestor clipboard page. 2. The Load balancer continuously monitors all connected PRPC servers. When one of the servers fails, it removes that PRPC server from the load balancer pool so that future requests are not directed to that server. 3. When the user is sending the next request, it goes to the new server. It creates a new requestor, and then their session is activated using the passivated data stored in the shared storage layer. Then it uses the UI structure and work metadata to paint the screen for the user. Browser Crash PRPC also handles browser crashes seamlessly, when configured appropriately. In the case of High Availability applications, when the browser crashes, a new browser session connects to the correct server based on session affinity. The user interface metadata and clipboard are used to repaint the screen. PRPC applications must be HTML5 compliant for browser crash and node crash to work. For user metadata recovery, PRPC applications must use dynamic containers (feature used in Pega Portals for 413 displaying work area of users). The Dynamic Container display work area uses tabs and the application tab recovery feature to recover the data. The following table explains the events that occur during a browser or a PRPC server crash. One of the main reasons to perform planned outages is to upgrade PRPC to a new version. In High Available applications, we could upgrade the PRPC server without impacting the user sessions. In a multi-node clustered environment, these are the steps done to follow to upgrade the system. 1. In the Load balancer console, disable the PRPC Server that is slated to be upgraded. This ensures that the load balancer does not direct new user traffic to this server; however it continues sending existing user traffic to that server. 2. To start moving existing users, PRPC recommends using a process named Quiesce. We can use AES, SMA or landing page to Queisce the PRPC server that has been disabled in the load balancer. Quiesce Process When the server gets queisced, it looks for the accelerated passivation setting. PRPC sets this to 5 seconds by default, so after 5 seconds it passivates all existing user sessions. When users send another request, it activates their session in another PRPC server without them losing any information. The 5 second passivation timeout might be too aggressive in some applications, so it is expected to increase the timeout to reduce the load. In general, this timeout must be coherent to the application, the typical time taken by a user to submit a request. Once all existing users are moved from the server, we can upgrade this server and then once the process is complete, we enable it in the load balancer and cancel Quiesce from AES/SMA. We can use the Requestor management page in the SMA to check the requestors. To perform a rolling start of all servers, we need to follow the same steps to disable each server in the load balancer and Quiesce them. After all users are migrated to another server, the server can be started. Pega 7 supports split schema database architecture which is useful to perform PRPC upgrades that minimally impact the user experience. The split schema separates rules and data by saving them into separate schemas. Splitting schemas enables minimal to zero down time during PRPC, application, and framework upgrades or patch installation. PRPC System Administrators can install and upgrade a new rules schema in the production database while the old schema is still in use. Pega 7 high availability features can then be used to move users from the old rule schema to the new schema on the production system to complete the upgrade. The steps to do this upgrade are: 1. Freeze rule development in the existing system 2. Create a new schema 3. Migrate rules from the old schema to the new schema 4. Upgrade the new schema to a new PRPC release 5. Update the data instances in the existing data schema 6. Modify the DB connections to point to the new schema 7. Quiesce one server after another to perform a rolling start Pega Mobile Pega Mobile Client - The Pega Mobile Client is available in a generic form in the app store as well as in a more customizable form using the Build from PRPC functionality. Pega Mobile Offline option - The ability to use some features of a Pega application in offline mode (when not connected to the internet). Pega Mobile Mashup SDK - Supports both the iOS and Android Development kits so that the Pega application can be embedded in any custom iOS or Android Application. Pega AMP - Pega AMP (Application Mobility Platform) is a platform for building, integrating, and managing native and hybrid mobile applications with PRPC. Pega AMP consists of a communications framework, a set of APIs, and services for mobile-specific tasks such as authentication, integration, GPS position reporting, and push notifications. Pega AMP Manager - Pega AMP Manager provides the ability to manage users, devices, apps, and backend mobile services in an enterprise. Pega AMP Manager is the main component in implementing both Mobile Application Management (MAM) and Mobile Device Management (MDM) services. Pega applications can be accessed as a mobile application without any additional development effort. All Pega 7 applications use dynamic layouts and the skin rule can be configured to be responsive and adjust the layout based on the screen resolution. All prior PRPC versions require that you use a special ruleset to render the application correctly on mobile devices. The mobile ruleset is also useful in Pega 7 applications because you can enable device specific functionality such as location services, camera, etc. So what are the different ways a user can access Pega applications? 1. Open a browser such as Safari or Chrome from your device (iPhone or iPad or any other android device). 2. Download the Pega 7 app from Apple iTunes or Google Play store. 3. Build and then distribute the custom app for the Pega application. 4. Embed the Pega app inside another mobile app using the Mashup Model. Let's look at the first two options here and then we will learn about building a custom native app and Mashup model. Open Pega as a Mobile App in Browser The simplest way is to open you browser on your device and enter the URL of the application to access them. Once the page opens, we can create a shortcut using the Add to Home Screen option. This option is available on both iOS and Android devices. The Add to Home Screen option provides the ability to define a name for the application and that name is used as a label for the icon that is being used as a shortcut. In newer releases, PRPC offers us the ability to quickly access the application, in the Designer Studio, when we click the About PRPC link in the lower part of the window. When clicked, it opens the About PRPC page, which shows the bar code that can be scanned to open the URL without entering it manually. Pega applications built on Pega 7 can be accessed directly on a browser automatically, however for applications built on prior versions it requires an additional step. The Pega 7 application has been modified especially on the user portals (case manager) to handle different screen resolutions automatically. If Pega 7 applications use dynamic layouts, the responsiveness feature goes a long way in making sure the applications renders appropriately. Dynamic layouts are new to Pega 7 and hence all applications built in prior versions require a special ruleset ( PegaMobile ruleset). The Pega Mobile ruleset is shipped as a jar file and, if the application requires it, administrators can use the import wizard to import it. Using the Pega Mobile ruleset helps in the following situations: Having mobile device support for PRPC 6.2 and 6.3. Getting access to specific mobile functionality that is not yet available in Pega 7 out of the box. Open in the Pega 7 App The Pega 7 mobile app shipped by the product team is a quicker way to access the Pega app than using a native App. The current version of the Pega 7 app can be downloaded from Apple store using or from the Google Play store using . All Pega applications built on Pega 7 are enabled to be accessed using the Pega 7 mobile app. The newer release also shows the QR code directly, so we can use the mobile device to scan the code to launch the app. These are some advantages in using an app rather than accessing it as a web application. The app can either be the Pega 7 app or a custom mobile app (which we will discuss shortly). Access to device capabilities- we could use device features such as geo-location services, push notifications, and so on. Offline Access - The application can be accessed offline even when the device is not connected to the Internet. There is limited functionality in offline mode but it does allow many common actions such as creating a new work item, editing an existing work item, and submitting work. Customers prefer using a custom mobile application instead of using the Pega 7 app. To do this, the application rule needs to be modified to select Custom mobile app (which requires a license). The application rule can then be configured to set the application name, URL and so on. There are two important things to note - the first one is to enable the Push notifications which send notifications to mobile device. The Push notification content is configured in the flow using a smart shape. The second one is to customize the icon and splash screens to use different images than the Pega 7 images. The Help icon next to the label opens a PDN page which provides the Assets zip file. The development team can modify the images and upload it. After updating the assets file, we can use the iOS Settings and Android Settings sections to make necessary changes that are required to create a build and publish it as an app in the Apple Store and Google play store. 422 The build process for the hybrid mobile app uses a hosted build server, which is a licensed product. There are two system-wide settings that we need to put in place based on our separate build server account: the build URL and the authentication profile. 1. Dynamic System Settings The build server is entered in the pega/mobilebuild/baseurl defined as part of Pega-AppDefinition ruleset. 2. Authentication Profile: Pega ships a default profile named MobileBuildServerAuthentication, which should be modified to set the authentication scheme, user name, build server host name and so on. 423 424 Distributing Hybrid Apps The Hybrid apps can be built using the shipped build Server and then hosted in the iOS or Android Store respectively. However, enterprises have reservations in hosting custom apps built in the public domain. To help alleviate these concerns, Pega offers the ability to use AMP (Application Mobility Platform) Manager to host and distribute the apps. When using AMP, Pega offers the ability to push apps directly to devices instead of people pulling them from the app store. Administrators or lead developers managing AMP get a user portal that provides various pages that help them to manage mobile apps, manage devices, manage users and so on. The dashboard gives a holistic view of how many users, devices and apps are set up. An Administrator creates user groups and each group has apps assigned to them. Users can then be added into the group. Users then receive a notification in on their mobile device to install the app automatically. 425 The administrator can remove users from the group and the app is removed from the user's mobile device automatically. AMP Manager is separately licensed software and these fields in the application rule are used for communicating between Designer Studio and the AMP Manager when pushing or pulling user rights and distribution of your mobile application across various mobile devices. In addition to this option, the customized app can also be distributed by using a smart banner. We can create a banner that appears in mobile web browsers prompting users to download the application from the app store. Again the icon can be customized to show a different image. The last mode of accessing a Pega Application is by embedding the application inside another existing mobile app built using other technologies. Pega applications can be launched from within a native iOS or Android development kit. This mashup approach also supports access to device capabilities. This is relevant when an existing mobile app has many different functions, but only some part of that is a Pega process. For example, the custom mobile app is a complete customer-self-service app where only one part of the process (update billing information or requesting a refund) is controlled through a Pega process on the backend. That one piece of the process can be seamlessly integrated into the existing mobile app without new development efforts. So the Pega 7 screen that we see here in this rule form can be embedded inside a custom mobile application. This form is presented to users when they press the Report Accident button on their phone. Pega Mobile ships a mashup SDK which supports both Java and Objective C (for Android and iOS respectively). Native App developers can import the jar file into their environment and then use it as a bridge between the native app and web view. Pega applications are still launched as a web view inside the native app. Pega Cloud The Pega Cloud is built as a highly available and redundant architecture scaled to fit the customer's requirements. Standard deployment consists of VPN and load balancers, multiple application servers scaled horizontally, a Primary Database and a Secondary Database for DR (Data Replication) purposes. Each Pega customer gets a dedicated Pega Private Cloud and Pega Cloud uses AWS (Amazon Web Services) as the IaaS (Infrastructure as a Service) provider. The Pega Cloud offering builds additional security layers on top of AWS to secure data in an encrypted form using Pega Cloud Cryptology. Data stored in Disk (log files, data files, caches, and so on) are stored in an encrypted format. The key to decrypt is stored in memory and not in the disk itself. The Pega Database can also be optionally encrypted using JCE (Java Cryptography Extension). The Pega Cloud firewall restricts users from breaking into the Pega Cloud by using a three-tier firewall and the Pega Cloud encrypted overlay creates distance from IaaS and enables encryption in server traffic. Pega Cloud provides automatic nightly backups and a quicker decommissioning process if the client decides to discontinue using the cloud. Shared layers: The shared layer consists of the base PRPC that comes as part of the installation. All tenants share the same PRPC version and when a new version is released, the upgrade happens at the shared layer and all tenants upgrade to a new version when the shared layer gets upgraded. All PRPC frameworks are also stored in the shared layer. All tenants see the frameworks and hence the tenants are determined based on the frameworks that they use. Tenant Layers: All application development done by the development team gets stored in tenant layer. The shared layer does not usually store any rules even if they are common to different tenants. The primary reason is that any information in the shared layer is accessible by all tenants, information shared by multiple tenants must be replicated across all tenant layers. This is not a limitation per-se but is helpful in ensuring that the data in tenant layer is applicable to the tenant while the data in shared layer is applicable to all tenants. However, if it requires us to publish a shared application, we can create and store a multi-tenant implementation application in the shared layer. This application is shared by all tenants who can customize content if required, however the data they create in tenants is not shareable to other tenants. Multi-tenant administrators provide a non-multitenant system for developers to build this implementation layer, which is then moved to the shared layer by the multi-tenant administrators. When accessing a Pega application hosted on a cloud system, the debugging tools (Tracer and Clipboard), the System Management Application (SMA) and other performance tools behave the same way as that in the regular system. You can access the log files from Designer Studio and they are stored in the file system. Remote tracing is allowed, so developers can login and trace the session of another user logged in to the system. Similarly, the System Management Application displays all requestors logged into the cloud instance and we can run the Tracer and Clipboard or view performance statistics for a specific user. Multitenant systems work a little differently in a Cloud deployment. This is a sample URL for two separate tenants. Both the URLs are identical except the string that appears right after PRServlet. That string identifies the Tenant ID and when the user logs in, the Tenant ID is used to load the data specific to that tenant from the Pega database tables. When hosting an application on a multitenant system, we get administrative access for the shared layer to access the application, so in this case it is . The System Management Application, remote tracing and few other features are disabled when logging in the tenant. When we open the SMA for the MTTesting server, we see all the tenants logged in to the server. The Requestor page displays the tenant name to the left of the Requestor ID to identify the tenant. Dev Deployment There are four main roles involved in the deployment process. Development Manager of Build Master Leads the application and ruleset version planning efforts. This is of particular importance for multi-stream development. Responsible for creating and versioning the deployment infrastructure: Application rules, rulesets, product rules and access groups. Create branches for development teams. Merges branches back into the trunk. Oversees creation of the deployment package. Verifies version numbers of exported rulesets. Coordinates the transfer of rules with System Administrator. System Administrator Imports the deployment package into target systems. Performs additional configuration if necessary according to the Lead System Architect requirements. Runs verification tests to ensure package is deployed correctly. 433 Database Administrator Works with the System Administrator to identify tables/schema changes which must be deployed into the next system along with the application assets. Lead System Architect Lead developer on project. Knows the intricacies of the application and assists in defining rules, data, and work instances for the product rule to create the deployment package. May ensure all rules have been checked in by other developers. May oversee the creation of certain test cases prior to development to QA. Roles may vary based on each organization and some responsibilities may be carried out by the same individual. As a best practice, we recommend that rulesets and data instances be included in the same product definition. In some specific cases, for example if you have a lot of data table content or if the data follows a different lifecycle than the rules, a separate Product rule might be appropriate to hold the data instances. If you decide to separate the data, make sure to balance the number of Product rules versus any complication of delivering the data. Changes to data instances must be reported from the development team to the Development Manager before the release. The recommended communication vehicle is the Product rule itself, which should be created at the same time as the application ruleset versions. Any new or updated instances should be listed directly in the Product rule by the developer who added or updated it. The Development Manager might need to extend this process for special cases, such as when a data instance needs to be deleted. Create a Deployment Package It is the responsibility of the Development Manager or Build Master to supervise the creation of the deployment package. Before the planned release the structure for the next release needs to be setup and the creation of the deployment package initiated. The following tasks needs to be performed by the development team: Create a release document in collaboration with the release engineer Create versions of the application rulesets for the next release Create a new application rule for the next release, if required Create an product rule for the next release Point the developer's access group to the next release Make sure that all rules in the release are checked-in Lock versions of the release's application rulesets Make sure the smoke tests are executed Validate that the Product rule contains all changes Export the Product to the deployment package If applicable, export SQL DDL scripts approved in the release version to the deployment package If applicable, export PRPC hotfixes approved for the release version to the deployment package 434 If applicable, export environment change information and resources in release version to the deployment package Finalize the deployment package and upload it to the release repository In all exception cases, the release plan needs to be amended accordingly. We recommend that the delivery package be created as a single zip file containing all information and resources needed to execute the deployment of the release to a higher environment. The archive file should be versioned and stored in a repository. Subsequent drops to the environment are typically incremental. In other words, only the fixes are promoted with subsequent deployments. We recommend that the deployment package archive file contains the release documents and one or more subfolders with the deployment artifacts. The release document contains the release information, such as content, and installation and smoke test instructions. If security policies allow, database schema changes can be applied automatically by the import process using information in the archive file. In other cases, this process needs to be done with the involvement of the Database Administrator (DBA). The SQL Script folder contains the SQL files provided by the development team. These files contain all the database modifications required for the release. Follow the this naming convention "ABC_XX_XX_XX.sql" where ABC is the file name, for example, install or update, and XX_XX_XX is the current release version. Try to package the SQL commands in as few packages as possible to ease deployment. If multiple files are necessary, mention the order of execution in the release document. One or more Product files are provided by the development team. Follow this naming convention "ABC_XX_XX_XX_YYYYMMDDHHMM.zip" where ABC is the application name, XX_XX_XX the build version, and YYYYMMDDHHMM the timestamp. The timestamp can be useful to track differences in data instances. Place changes to the environment, such as PRPC hotfixes, libraries and java property files, in the Environment Specific folder. Use subfolders to organize the different type of environmental changes. Do not forget to include instructions in the release document. Deploying a Release The System Administrator supervises deployment on the target systems. The following steps, typically outlined in a release plan, are required to prepare for the deployment on the target system: 1. Obtain the deployment package from the repository. 2. Read the release document. 3. Perform a backup of the database. 4. If necessary, perform a backup of the node. 5. When the preparation is complete, the actual deployment can start. 6. Apply the environment changes as described in the release document. 7. Execute contents of the SQL file against the database. 8. Import the product archive file into the target system. 9. Copy the PegaRULES log file containing the import logs and store it for future reference. The import tool displays a list of all rule or data instances where the IDs are same but update times differ. Normally rules should never appear in this list since the rulesets should be locked on the target system. If rules appear on the list it should be investigated since it probably means that someone has unlocked the ruleset and made changes. Verify the data instances that will be replaced before selecting overwrite existing data instances to complete the import. Execute the smoke test as described in the release document when the product file has been successfully imported. If the smoke tests fail and the product requires rebuilding proceed to the development environment, create a new patch ruleset version, make the change and create a new product file. In certain situations you might need to revert the database to the backup taken prior to importing the new release. In that case it is important to understand if cases have been created or updated from the time the backup was taken and decide on a strategy how to handle those. Advance app migration There are several tools available to support the application deployment process. In the Senior System Architect (SSA) course we looked at the Application Packaging wizard and the Import and Export utilities. In this lesson we'll continue and look at the Migrating Cases wizard and how to import and export applications using the Command Line tool. We'll also look at how applications can be migrated automatically to their target systems using the Product Migration wizard and how the rulebase on two systems can be compared using the Rulebase Compare tool. At the end of this lesson, you should be able to: Migrate Cases Migrate an Application using the Command Line Tool Use the Product Migration Wizard Compare the Rulebase of two systems Common Use Cases for Case Migration There are several situations in which you might want to migrate cases from one system to another. For example: If you need to investigate a reported issue on a production system you might migrate cases from the production system to a test system. If cases are part of the application itself, for example the lessons in Pega Academy are modeled as work objects, in such circumstances the cases need to be migrated as they are promoted through development to production. If applications are used offline. Imagine a complaint application used on cruise ships. When the cruise ships are out at sea complaints are entered in the system and when the ship reaches a harbor the complaint work objects are packaged and uploaded to the master system for further processing by the customer care team. Package work wizard The Package Work wizard enables us to quickly create a product rule, also called a RAP file, which contains cases, assignments, history and attachments. Start the Package Work wizard by selecting DesignerStudio > Application > Distribution Package Work. The wizard consists of three steps: Enter Description, Enter Criteria and Select Work Classes. First we need to enter the name and version of the product rule to be created. The text entered in the description field appears on the history tab of the product rule. We also need to specify the ruleset and version in which we want to create the product rule. In the next step we select the work pool that contains the work classes to be included in the product rule. In addition to the work item instances, it is also possible to include assignments, attachments, and history instances in the specified work classes. In the last step we need to select the work classes in the work pool we want to include in the product rule. 438 A Work ID Range must be provided. Make sure to enter the correct prefix. If the prefix is incorrect or missing, the work items are not included in the archive file. The final screen shows the product rule generated. It is possible to start the Product Migration wizard using the Start Migration wizard button. Let's have a look at the product rule generated by the Package Work wizard. As expected, there are no applications or rulesets included. The class instances section contains the instances selected in the wizard. 439 The first three lines are related to the work item itself. The first line specifies the work class as selected in the wizard. The second line contains the Link-Folder class, which defines the work items that belong to a folder. The third line contains the Index-WorkPartyURI class, which allows reporting and searching for work by party. The next ten lines include assignment instances related to the work objects. The following seven lines include work object attachment instances. The last line for the class instances includes the work object history. 440 The ID counter for the ID-prefixes are stored in a separate database table. The values of the counters for the included work object classes are included to ensure that there will be no duplicate IDs generated on the target system. You can create the archive file using the Create Product File button. In addition to the product rule the Package Work wizard also creates a set of when rules that are specified in the when filter fields. They are used to filter instances to make sure that only items relevant to the included cases of the classes are included. The naming of the when rules follow the same pattern: Include, then the type of class it filters, for example, Work, then _ followed by the name of the product rule, in our case Candidates, and then _ followed by the product rule version. For example: IncludeWork_Candidates_01-01-01. command line tool Use the Command Line Tool The PRPC utility command line tool is part of the software distribution zip file. We can use it to import and export rules. The command line tool should only be used when scripting implementations. Otherwise, use the import and export functionality in the Designer Studio. The utility command line tool works directly on the database, the source or target systems do not need to be running when the script executes. Extract the content of the software distribution zip file into a directory. You need to have a JDK, Java 5 or higher, installed to run the command line tools The path to the JDK must be defined in a JAVA_HOME environment variable The target database vendor's JDBC driver Jar file must be available along with the information required to connect to the database The utility command line tool files are located in the directory called utils, which is in the directory called scripts. The prpcUtils.properties file needs to be updated with the parameters required for the utility to be run. The database connection details in the common section are required for all utilities. 442 Typically you want to use a user's access group to determine runtime context rather than use the default App Requestor's access group. Specify the operator ID and password in the pega.user.username and pega.user.password properties. prpcutils-operator.png There are two scripts available: prpcUtils.bat for Windows Server and prpcUtils.sh for Unix derivatives. The command to run a utility looks like this (Unix is shown here): ./prpcUtils.sh <utility> [--driverClass classname] [--driverJAR jarfile] [--dbType name] [--dbURL jdbc_url] [--dbUser username] [--dbPassword password] Where the utility parameter is mandatory and can be one of the following: importPegaArchive - import an archive file importCodeArchive - imports code instances importAppBundle - imports an application bundle export - exports an archive exportRAP - exports a Rule-Admin-Product (RAP) scanInvalidRules - fetches all invalid rules present in an application runagent - starts an agent The database parameters are only needed if they are not provided in the prpcUtils.properties file. If the parameters are provided they override the ones specified in the file. After supplying these parameters, the utility starts running an ANT script, which performs the required actions based on the settings in the prpcUtils.properties file. The ANT script is defined in the file called prpcUtils.xml in the same directory. The script puts the generated logs in a directory called logs in the scripts directory. Each utility has its own options in the prpcUtils.properties file. Let's start by having a look at the import tool. The Import Tool Each of the three import commands has its own options in the prpcUtils.properties file. All import commands require the full path for the file to be imported to be specified. The importPegaArchive command is used to import archive files, typically created from a product rule or the export landing page. 443 Three modes are supported for the import.mode property. install - does not update existing instances, but only imports new ones. A message is written in the log for each instance that already exists. import - updates existing instances and removes duplicates. hotfix - updates existing instances and removes duplicates only if the rules to be imported are newer than the existing ones. We can define if we want the import to fail on an error in the import.nofailonerror property. Never disable the inference engine. Specify how many records to import between each database commit in the import.commit.count property. Always leave the import.properties.file property empty. The importCodeArchive command is used to import code instances into the system. Leave the import.code.mode property set to its default value as the system determines which mode to use. Specify the codeset name and version in the import.codeset.name and import.codeset.version properties. Leave the import.codeset.patchdate property commented out, it is set by the system. The importAppBundle command is used to import application bundles into the system. An application bundle is an archive, similar to the archives that are produced by product rules or the export landing page. However, an application bundle contains an XML document known as the manifest 444 that defines the order in which rulesets, rules, and other items in the bundle are imported. Application bundles are typically used to install a Pegasystems solution framework. The import.slow.install property can be used when there are issues with the database driver. We need to specify if we want to have a report generated in import.report.mode property. Use the import.compile.libraries to specify if imported libraries should be compiled or not. The Export Tool Both the export commands require the full path to the exported archive to be specified. If the exportRAP command is used the only relevant property is the export.archive.productkey which identifies a Rule-Admin-Product instance by its pzInsKey. Use the View XML option in the actions menu to get an XML representation of the product rule showing the pzInsKey. The rest of the properties in the export tool section apply to the export command. 445 It is mandatory that we specify the classes to be included in the export.classes.included property unless exporting a list of pzInsKeys. Enter classes to include separated by a comma. We must specify if we want descendants of the class included in the export.included.descendent property. The properties export.classes.excluded and export.excluded.descendent allows us to filter specific classes and the properties export.startVersion and export.endVersion allow us to specify the ruleset version range. Always leave the export.template.file property empty. We use the export.keys.file property if we want to export a list of pzInsKeys. Enter one pzInsKey per line and do not provide other properties such as export.classes.included when using this option. The export.ruleset.name property allows us to specify a ruleset name to include. In the last property we have the option to preserve the lock details on exported rules that are checked out. Use this only if the corresponding locked instance is moved. Product Migration Wizard Use the Product Migration Wizard The Migrate Product wizard lets us automatically archive, migrate, and import a product rule also called RAP (Rule-Admin-Product) to one or more destination systems. This can be very useful when, for example, moving a product from the development system to one or more test systems. The wizard eliminates the need to: Create the archive file on the source system. Upload the archive file into the target systems Log into each destination system and manually import the archive Select DesignerStudio > Application > Distribution > Migrate Product to start the wizard. First we need to select the name and version of the product we want to migrate. If the archive was already created and exists on the server we can select use existing file to eliminate the need to rebuild it. Next we need to specify the target systems. The default server context root is prweb, but can have been changed. It is possible to use HTTPS for the transfer. Click Next to continue to the next screen. 447 Enter the username and password needed to authenticate access to each of the target systems. Click Finish to submit the request and start the migration process. 1. The product archive file is created on the source system. It uses the following naming convention: product name _ version number - patchnumber.zip . In our case the file is called ProductToMigrate_01.01.01.zip since there is no patch number available. 2. A covered work item is created for each target system. The process then attempts to make a connection to each target system and places the archive file in its ServiceExport directory. 3. The target system returns a success message if the connection succeeds and the file successfully loads onto the server and is imported. The source system resolves the work item upon receiving the success message. Migration Failures If the connection fails or the file is not loaded due to a system error the work item stays unresolved. 448 It is possible to either retry the connection or cancel it, which withdraws and resolves the work item. 449 Save Target Systems Rather than having to re-enter the host name, context root, and port number each time we submit a migration request, it is possible to create a list of saved target systems by creating an instance of the class Data-Admin-System-Targets. The list of saved target systems appears the next time the Migrate Product wizard starts. 450 Rulebase Compare Compare the Rulebase of Two Systems Use the Rulebase Compare wizard to identify differences in the rules present on two systems. For example, we can use the tool to confirm that a product was successfully migrated, or to identify the changes needed to synchronize two systems. Select DesignerStudio > System > Refactor >Rulesets and select Rulebase Compare to start the wizard. Enter the details of the target system to which we want to compare. 451 It is possible to make a comparison based on a product or rulesets. Verify the information and click next to start the comparison. 452 The wizard generates a report summarizing the actions we need to make on the target system to match the source system. Add - The rule appears on the source system but is missing on the target system. Delete - The rule is not on the source system but on the target system. Update - The rule has different time stamp on the systems. Upgrades Upgrade Innovation Centers Before we commit to performing our own upgrade, it is worth mentioning the Upgrade Innovation Centers. These centers provide a service offering that focuses on upgrading existing system and bringing them up to date with the latest features. We should first evaluate if leveraging one of these centers is in the best interest of our business. For the sake of this lesson, we'll assume we are not leveraging a UIC and will be doing the upgrade ourselves. Prior to starting an upgrade Before we can upgrade our system, we need to identify the version of our existing system, and the platform we are upgrading from. We also need to determine if we're going to be doing an in-place upgrade or a parallel upgrade. The recommended best practice is to perform an in-place upgrade when possible; however, some situations may require a parallel upgrade. We will describe the process for parallel upgrades a little later in this lesson. For now, let's focus on an in-place upgrade as it is the best practice. Upgrade guides Within the release media, and available on the PDN with the deployment guides, are two upgrade guides we can use to perform our upgrade: PRPC 7.1.X Upgrade Guide PRPC 7.1.X Upgrade Guide for DB2-z/OS The second one is specific for DB2-z/OS installations; all other installations will use the first upgrade guide. These guides should be reviewed prior to initializing any upgrade. Other considerations Wait, the upgrade guides aren't the only things we need to consider. Several other factors need to be evaluated before we can commit to upgrading a system. Is there a framework installed? Some PRPC frameworks may not yet be compatible with the newest version of PRPC. We cannot commit to upgrading a system until the framework is also ready to be upgraded. Are there multiple applications on the system? In some instances, multiple applications may be concurrently installed in a system. While not as prevalent in Production systems, unless we're supporting multi-tenant, this is a common occurrence in Development systems. We cannot commit to an in-place upgrade of a system unless all of the applications on system are ready to upgrade. Is there active development in progress? We want to plan our upgrades when there is sufficient time and resources to regression test our applications in the new system. This is to identify if any customizations done in the system are no longer compatible with the latest version of PRPC. In some businesses, they request to implement the upgrade at the same time as a new development release, but this often becomes problematic as we cannot pinpoint the issues that may arise to either the upgrade or the new release. Therefore we want to make sure our upgrades are done separately from any new development. Is the business ready for an upgrade? It is also important to identify if the business has the bandwidth for an upgrade. Most businesses go through cyclical seasons of high and low business. We want to ensure our upgrade concurs with one of the businesses low seasons. For instance, we would not want to upgrade an accounting application during the critical tax season. Nor would it be a good idea to upgrade a call center just before a new highly anticipated product launch. Plan the upgrade When committing to an upgrade, we should ensure we have a plan in place that the whole team is following. Both development and the business needs to be aware than an upgrade is being performed as well as commit resources to test and address any issues that might arise. Upgrades should always be performed in the lowest environments first, such as a sandbox or the development system, and then propagated through the environments similar to any other release. Run the guardrail reports Prior to implementing an upgrade, the guardrails reports should be run for all the applications on the server. These reports identify rules that do not follow the PRPC guardrails and as such might encounter issues during an upgrade. The best approach would be to have the development team address any of these items identified prior to the upgrade. This is not always possible as occasionally a specific business need requires the breaking of a guardrail. For those instances, careful notes should be taken on which rules are outside the guardrails, and how they have been implemented. This is so that the development team can specifically target these rules post upgrade and validate there are no recurring issues. Backup, backup, backup There is never a guarantee when we're dealing with changing an underlying system. Before any upgrade, we should always ensure we've taken a backup of both the database and the application server files. This shouldn't be new to us as we should be taking backups before any migration anyways. We just need to ensure we also backup our ear or war and any configuration files at the same time. The steps of an in-place upgrade When implementing an in-place upgrade, we should follow the published upgrade guide for this release. The guide will provide the details of what needs to be done to upgrade our system, but let's review the high level process now. First, we need to validate our system will be compatible with the upgraded version. As new PRPC versions are released, occasionally backwards compatibility with all versions of application servers or databases cannot be maintained. We should consult the platform support guide to ensure the new version can run on the existing environment. If not, we will need to do a parallel upgrade. Stop the existing applications from running. There are occasionally some pre-upgrade scripts or processes that will need to run. This is to prepare systems that may have gone through multiple upgrades and ensure they will are in the expected state for our upgrade. The next step is to upgrade the database. This can either be done automatically, using the Installation and Upgrade Assistant (IUA) or manually by running the provided scripts. The best practice is to use the IUA for this task. After the database is updated, we then need to import the new rulebase. This can also be done automatically using the IUA or manually via scripts. Again, the best practice is to use the IUA. The next step is to deploy the new archives. We would first undeploy any existing war or ear files and then replace them with the new ear and war files. Follow the steps in the installation guides for deploying the ear or war. Once the archives are in place we can perform any configuration changes in the environment to support the new installation. If we upgraded using the manual scripts, we should first log into the system and run the Upgrade Wizard. This process ensures the additional steps necessary, such as updating rule types have been completed. At this point, we should be ready for addressing our applications. Our current applications have all been created based on the previous version of PRPC. The development team should lock and roll their Rulesets to a new version prior to continuing any new development so that it is easily identifiable which version of the application is compatible with this upgraded version of PRPC. During this process, they should ensure their application rule is also updated to being built on the latest version of the PRPC rulesets. Next, we work with the development team to run the upgrade tools available in the designer studio. These tools attempt to automatically update the existing application to take advantage of new features, such as an updated CSS or to point a deprecated rule at its replacement. The system should now be ready to be validated. Validating an upgrade is covered later in this lesson. The steps of a parallel upgrade When necessary, especially in the case of multiple applications, we may need to take a parallel upgrade strategy. This process leaves the existing system intact, and implements the upgrade in a parallel system. Instead of following the upgrade guides for our system, a parallel upgrade requires us to install a new instance of PRPC using the installation guide. The lesson on new installations covers this process. After the new system is installed Once our new system has been installed and verified, we need to start migrating our rulebase and, in the case of production, our cases. First we need to implement any customizations that have been done in the existing system into the new system, such as encryption, additional database tables, JNDI connections, certificates for SOAP and other connections, etc... Next we migrate the application, just like we would for promotion through the environments. We can then install the application package into the new instance we've just created. We will also need to migrate the data records, such as operator ids, business calendars, organization data, etc... These can be packaged and migrated just like we did for the application. If this is a production system, we should migrate the cases from the existing system into the new instance. It is a best practice to also perform this migration of cases in one of the earlier environments, such as Preproduction or QA, to validate the process includes all the necessary case data. In most instances we should be ready to begin validating our installation. Occasionally, the lead system architect (LSA) may have identified additional steps necessary for this specific implementation. Work with the development team to ensure there are no additional steps required. Once the development and business teams have validated the new system, we can allow the users access to the new system. This can be done by either: o Updating the DNS servers so that the new server now receives all request for the previous URL (preferred) o Updating the links the users use to access the system to point to the new URL. Validating an upgrade Whether we did an in-place upgrade or a parallel upgrade, we need to validate the upgrade was successful. We do this by having the development/QA team run their suite of regression tests against the new instance. What to do if there's an issue? In most cases, if an issue arises, it is due to the development team's customization of rule that was outside the guardrails, as we've identified before our upgrade. The development team will now need to address the customization and determine what changes will need occur in order to fix the issue. The second most common issue with an upgrade is attempting to upgrade a PRPC system without upgrading the associated framework. This can come from staggering both upgrades. It is important to upgrade both PRPC and any installed frameworks in one shot. Failure to do so can potentially corrupt any existing cases. Rarely, an issue occurs that cannot be immediately fixed. In these cases, we should first rollback to the previous version. A careful analysis of what went wrong will then need to take place before we commit to implementing the upgrade again. Monitoring and Diagnosing Plan Performance Testing in Development Systems Pega 7 offers several tools that help us to validate the application we are building meets the desired performance guidelines. It is highly recommended that we use these tools periodically to ensure the application is performing well. After implementing any major functionality (user story), developers should perform the following steps to test application performance. 1. View Guardrail reports to verify that the application does not have any warnings. 2. Run Tracer with the performance settings enabled and check to see if there are any fixes we should make to improve the application. 3. Run through the entire application by creating a new case, navigating through all screens and then resolve the case. Then use the My Alerts tool to check the alerts that are captured in the current session. 4. Run PAL and then take readings for each interaction. Before looking at each of these steps in detail, these are some points to consider during performance testing on development systems. 1. When looking at PAL readings, focus on readings that provide Counts. Counts are always accurate and hence more reliable than the timers. If we repeat the same test again and again, the counts (for example, Total Number of rules executed) always remain the same while the timers (for example, Total Elapsed time for the reading) might vary between tests. 2. Focus on Counts in the Alert logs as well. When looking at alert logs, times may not make sense when similar testing is done in production systems due to differences in the infrastructure, data volume, concurrent users, and so on. In addition to counts, pay close attention to all alerts caused by database interactions. This might suggest where to define indexes, tune queries and so on. 3. Lesser is better: Pega 7 offers a wide variety of features, for example, we can use SQL functions in report definitions to filter results or present data in a specific format, we can sort on any number of columns, we can query from an unexposed column. It is always important to make sure the feature is required and use it only when it's appropriate and required because they do impact performance. Be sure to use them judiciously. 4. Testing early helps to address issues quicker. Use tracer, look at alert logs, and take PAL readings frequently. These are as important as creating applications to meet the requirements. During the development phase, it is very easy to pay more attention to building the rules and ignore testing. Not testing performance periodically impacts delivery if the performance is not meeting expectations. 5. When saving rules, pay close attention to the rule form to make sure there are no warning messages. These warning messages do appear in guardrail reports, but it is mandatory that all developers make sure that there are no warning messages in the rules they write. At the minimum, write a justification as to why this warning cannot be avoided. This provides some context when the lead developer runs these reports. 6. PRPC is deployed in an application server and uses a database server to read and write cases, read rules, report from database tables and so on. Make sure JVM settings are configured in the application server and appropriate database indexes are defined. Let's take a look at various tools that developers can use while testing a Pega 7 application. 1. Clipboard: The clipboard is primarily used for application debugging to check if the properties are getting accurate values, the page names are accurate and so on. The clipboard can also be used as a performance testing tool by checking the page size. The tools menu provides two options - Analyze and Collect Details. When we select Analyze a popup window opens displaying a table that contains information about all pages in that specific thread as well as the requestor. The most important thing to check is the page size and the INFO column for all pages returning a page list. The Collect Details option (when enabled) shows details on all pages including the ones that are deleted. After enabling Collect Details, click Analyze to see the details. Collect Details is disabled by default, and can be enabled to see which process created a page that is not expected to be there in the first place. Enabling this flag shows a new column called CREATION STACK which shows the rule that created this page and which one deleted a page. The flag should not be enabled unless we need to look at these details. Guardrail Reports: Guardrail reports play a critical role in ensuring applications are built conforming to the best practices. Pega 7 offers guardrail reports that make a developer's life extremely easy, it scans all the rules built in the entire application and provides a report with the list of rules that do not adhere to the recommended best practices. Pega 7 also provides a compliance score report which gives an indication of how many rules are not complying with the recommended practices. As we can see we can apply filters to run through a specific sprint, we can look at the number of alerts generated by the system and how it increases over the lifecycle of the project. This report can be exported and can also be scheduled to be delivered to a set of users automatically. We can look at the list of rules with warnings by clicking the number 82 (above), or we can use additional reports on the landing page to drill down to the next level. The compliance details report adds a little more context and analysis in terms of when to resolve these issues, who introduced these issues and so on. The warning summary report provides the warnings grouped by rule categories and also the severity. The warning summary also offers the ability to see all reports even if they are justified by developers. Having this as a filter enables the lead developer to focus on unjustified warnings before addressing the justified warnings. Expanding any of the rows displays the list of all warnings for a specific rule type which is identical to the warning details report which displays the list of all warnings along with additional filtering capabilities. Tracer: Tracer is similar to the Clipboard in that it is mostly used as a debugging tool while stepping through the case. However, Tracer in recent releases has been enhanced significantly 464 so we can use it for analyzing performance as well. Shortly, we will learn about the various tracer settings and additional tools that are useful when interpreting tracer output. 4. PAL: The Performance Analyzer is extremely important when testing the application in development and using this tool helps us to identify potential performance bottlenecks that can occur. 5. Alert Logs: Alerts are captured in log files and as we mentioned earlier, we do not need to pay too much attention to alerts that are written when the time threshold is exceeded. Tracer Using Tracer to test performance Tracer is an extremely useful tool for application debugging. It traces the session in real-time and we can use this when unit testing the application to see which rules are getting executed, the value of the properties before and after the rule gets executed and the exact step where the exception occurred. Tracer has been enhanced to report on performance statistics and we recommend that developers use this extensively in the development phase of the project. The tracer settings dialog displays options to trace all performance related information. Let us talk about a few important settings. Abbreviate Events - Enable this setting when using tracer to debug performance., It limits the page output and helps Tracer to run faster. Interaction - This flag enables PAL statistics and groups the tracer output by interaction in the Tracer viewer. Tracer captures output directly in the tracer window. Viewing the output in this window is helpful when there are not too many steps displayed. When it goes beyond a few hundred lines, viewing the output becomes extremely difficult. Use the tracer viewer tool to view the output instead. Set the Max Trace Events to Display to 0 so that the tracer does not write any events. After the tracer output is disabled, continue testing normally and when all tests are completed, click the Save icon in the top which prompts you to save the file on the local system. Use the Tracer Viewer to interpret the results. The following article on the PDN explains this tool in detail- output. The Tracer Viewer allows us to look at the key events by highlighting them in red. We can expand the branch and drill down to the actual step which causes the issue. On development systems, it is often required to trace sessions belonging to other users. The Tracer offers the ability to pick another operator who is logged in to the system. When Remote Tracer is clicked, it displays all users who are currently logged in to the system. Selecting a user from the list starts a tracing session for that user. It is still possible to modify the tracer settings and download the tracer output for other user's session. The Remote Tracer link in Designer Studio allows us to trace operators logging in to that specific node. If the development system uses a multi-node clustered setup, then we should use the System Management Application (SMA) to connect to that node and then trace user sessions. Running tracer from the SMA is helpful in development systems though it may be prudent to use security. Pega 7 can help to achieve this by configuring the PegaDiagnosticUser role in web.xml. PAL The most important tool a developer should run in development systems is the Performance Analyzer (PAL). Though Pega collects the performance statistics at all times, we need to use PAL tool and take readings to get this data. Taking a PAL Reading When we take a reading, it displays as a DELTA (meaning the incremental difference in the statistics from the previous step). The first step after starting PAL is to check the Int # (Interaction number), this should be zero when taking a fresh reading, click reset to delete all the interactions already captured. This number is always higher since PAL readings are collected in the background. After setting it to zero, perform the actions for which we need to conduct performance testing. Once done, click Add Reading, this displays another row of reading. The DELTA row displays key statistics and we should focus only on the counts such as Rule Count, Activity Counts and total Bytes. Clicking DELTA displays another window listing additional counters and other performance statistics. In Development, pay close attention to counters in the Rule Execution Counts, which provides information such as how many rules are executed and also the distribution by rule types ( number of data transforms, declarative rules, activities, when rules and so on). The other section we should look at is the Database Access Counts which provides information such as how many rows are being read from the BLOB(storage stream), how much data is being read from the BLOB(storage stream) and so on. 468 Tips on PAL Testing: 1. When testing applications, take readings for each screen. In the reading, look for factors such as number of interactions on that single screen (server trips from browser), number of bytes read and written, DB I/O count, number of rules, number of declaratives, clipboard size estimates, number of declarative indexes written. 2. Look for outliers such as 20 server trips, 8 MB of data read from BLOB, 200 rules being executed in that interaction. When we notice anything abnormal, investigate to see the details. Run Tracer to specifically identify the 200 rules, enable the DB query in Tracer to see the query and identify which column is read from the BLOB. 3. Look for the growth pattern: When testing in development systems, repeat the performance testing periodically. Watch for statistics such as the number of rows being returned from DB, is it growing based on the number of cases being created. The counts should increase with more cases in the beginning but watch out if it keeps rising after an extended period of time. This may be a sign that there may is an issue. 469 4. Tracer and PAL should help us in narrowing down the cause of the performance issue in most cases. DB tracer and the Performance Profiler can be used in special cases when Tracer cannot supplement all the details. The DB Tracer is useful to generate a stack trace to identify the hidden problem while Performance Profiler can be used in tracing inline when rules (when rules not explicitly referenced). 470 Viewing Global PAL Data PAL as we know runs in the background and but how do we leverage this information other than taking readings whenever we would like to? We can access the Performance Details screen using System> Performance > My Performance Details. This shows the list of sessions available for that requestor. We can pick a different User ID in the User ID field to look at the performance details of that user. This list shows only the information for the current day since the passivation is set to 1 day. The performance details can go back only until when passivated data is stored. In addition to this we can also use the Log-Usage reports through the SMA. Refer to the Monitoring and Diagnosing Production systems lesson for more information on the Log-Usage reports. Performance Alerts Performance Alerts Alert messages are captured in log files and can also be viewed in both the racer output and PAL data. Developers however must view the Alerts using the Alerts link in the Designer Studio. Clicking the Alerts link opens a window that displays all the alerts that happened during that session. If we need more information, click All My Sessions to see additional alerts. All My Sessions displays all the alerts on the current and all other passivated sessions for that requestor (typically set as 1 day). We can customize the data by clicking the Options link and in the additional fields that appear we can modify the Filter by field to display alerts on a different developer's session. When viewing alert logs, identify alerts that do not involve time, in some cases the time is caused by the other alert. Notice that there are three different alerts on Interaction ID 18 and the first one is a summary alert which usually follows with additional alerts. The second one is a time alert. Let's look at the third alert (BLOB Size Read). Expanding the alert, we can see additional details such as the Alert ID, Line (explaining the cause of the alert) and PAL stats. Use the PDN to search on the Alert ID (PEGA0039) to get additional details about how to fix it. In this case the following information appears. The pzInskey, in this case indicates an attachment is read from the BLOB and it exceeds the threshold value. Alerts as we can see are extremely useful in narrowing down and resolving issues. PegaRULES Log Analyzer (PLA) Pega ships another tool, the PLA which is effective in listing all alerts by importing a log file. If we need to debug alerts that happened on sessions that are past the passivated timeline, we must use PLA to import the alert log files and identify how frequently these alerts occur and then determine the priority in which to fix them.. Repeated occurrences of alerts can cause performance implications and the PAL data that is included in the alert helps us to identify the key statistics. After importing the log file in PLA, the alerts summary displays various alerts seen and how frequent they occur. The Alert Count by Application report which groups based on applications is quite useful in implementations using various PRPC applications. The following are the list of performance alerts that must be resolved if found in development systems. Most of the alert thresholds can be configured using dynamic system settings. We can adjust the values appropriately in development systems if we do not want to see too many alerts. Setting threshold values must be done cautiously since it might suppress displaying a potential performance issue. Alert PEGA0004 - Quantity of data received by database query exceeds limit PEGA0016 - Cache reduced to target size PEGA0017 - Cache exceeds limit PEGA0018 - Number of PRThreads exceeds limit PEGA0019 - Long-running requestor detected PEGA0021 - Clipboard memory for declarative pages exceeds limit PEGA0024 - Time to load declarative network time exceeds limit PEGA0025 - Performing list with blob due to non-exposed columns PEGA0027 - Number of rows exceeds database list limit PEGA0028 - GC cannot reclaim memory from memory pools PEGA0029 - HTML stream size exceeds limit PEGA0030 - The number of requestors for the system exceeds limit PEGA0031 - Generated stream overwritten and not sent to client PEGA0033 - Database query length has exceeded a specified threshold PEGA0034 - The number of declare indexes from a single interaction exceeds a threshold PEGA0035 - A Page List property has a number of elements that exceed a threshold Category DB Bytes Read Cache Reduced Cache Force Reduced PRThreads Limit Long Requestor Time Declarative Page Memory Reading Blob Need Columns DB List Rows Memory Pool Collection HTML Stream Size Requestor Limit Stream Overwritten DB Query Length Declare Index Clipboard List Size 475 Alert Category PEGA0036 - PegaRULES engine intentionally shut down PRPC Shutdown PEGA0039 - The size of a BLOB column read exceeds a threshold Blob Size Read PEGA0040 - BLOB size written to the database exceeds a threshold Blob Size Written PEGA0041 - Work object written to the pr_other table Work Object PR_OTHER PEGA0042 - Packaging of database query has exceeded operation time threshold DB Query PEGA0043 - Queue waiting time is more than x for x times Asynchronous Declare Page PEGA0044 - Reached threshold limit for message ID: PEGA00XX, will send again after [Date] Throttle alert PEGA0045 - A new request has been submitted for a page without using the existing one ADP Duplicate Request PEGA0046 - Queue entry not yet started by the load activity ADP Queue Not Started PEGA0047 - Page copy time is more than the loader activity execution time for the data page ADP Copy Too Long PEGA0048 - Page copy time and waiting time are more than the loader activity execution ADP Copy Wait Too time for the data page Long PEGA0049 - Search status check alert. The alert identifies that there is a problem with Search Status Check the Lucene Index, and that search results may therefore not be accurate PEGA0050 - Lightweight list has been copied n times. Lightweight List Copy PEGA0052 - Wait time exceeded for the page ADP to load asynchronously ADP Load Wait Exceeded What should we do when alerts occur? Look in- security-alerts-and-aes and clickon them to open the corresponding PDN article for that alert. The article on individual alert gives additional information on how to handle some of these alerts. Alerts are usually one of these types: Threshold Alerts: Alert messages written when a threshold value is exceeded, the threshold value can be represented in milliseconds, in bytes, as a whole number and so on. Most of the alerts belong to this category, fixing these alerts in development prevents issues from reoccurring in production. Look for alerts such as PEGA0021 and see if the application can handle these errors in one of these ways - expire the pages after its non-use, restrict the number of rows being returned, restrict if all the information stored in the data page is referenced and if we could remove some of them. Best Practice Alerts: Alert messages written when a best practice is violated. Note that alerts are not written on all best practice violations, the guardrail reports are still a primary mechanism to identify rules violating best practices. For example, PEGA0050 which is an alert indicating that a rule is copying a clipboard page list to another page list in an inefficient manner and in Pega 7.1, it is not necessary. Read this page in PDN- been-copied-n-times for more details on this alert. PEGA0025 is another alert which displays alerts if the report is reading from unexposed columns, these are also shown in guardrail reports. Observation Alerts: Alert messages written based on what happened or what is being noticed. For example, Pega0028 which is an alert generated when Garbage Collection (GC) process cannot reclaim enough memory to remedy the performance impact. Event Alerts: Alert messages to indicate an event, this may or may not be important. For example, PEGA0049 is written when there is an issue in the Lucene server. This might mean that the search will be erroneous until it is corrected, there may be a problem in the search settings or it requires reindexing to fix the problem. Log Files Log files and Logging Levels Log files in addition to alert logs are viewed primarily to look at exceptions including the Stack Trace. We use PLA to import the log file and then we can look at the list of all exceptions and frequently occurring exceptions. The system summary page provides the summary in terms of counts of errors, warnings exceptions and Info messages. Out of this the exceptions get more attention and are listed in the PLA output. Use the Export to Excel option to view the log messages in a spreadsheet which gives the option to use various filters. In addition to the exceptions, log files can be used to look at how much free memory is available, this is extremely useful to see how the memory consumption varies with more users accessing the system and if there is anything abnormal to note there. For debugging purposes, we can configure rules such as activities and flows to write to log files. In an activity use Log-Message method which can write a specific message to the log file. By default in development systems we must have the Logging level enabled as INFO so all messages including INFO, DEBUG,WARN and ERROR gets recorded. Other Tools 1. PegaDifferTool: Consider this scenario: When testing a Pega 7 application, we notice that specific functionality is not working and we were pretty sure it worked earlier, so something might have changed recently which is causing this issue. Sounds too familiar, there is a pretty easy way to track down the issue. Pega offers a PegaDiffer tool, which can be downloaded from- exchange/components. There is a pretty good user guide for this tool available on PDN. This tool allows us to compare Tracer outputs - we can test the application on the current ruleset version and also create another access group to test the previous ruleset version. While testing the application, we should run Tracer to get the tracer output and then compare the tracer output to see exactly what changed in between. 2. DB Tracer: DB Tracer can only be run when we need to get additional details while tracing database issues. Tracer must always be used to check the database issues but when additional information is needed, we can use DB Tracer. DB Tracer can be started in the performance landing page System> Performance>Database Trace. On the Performance tab, click Play to start collecting database trace. Enable the Trace options to only trace what is required. 3. PerformanceProfiler: Profiler is useful to identifythe list of rules that are being executed. It is useful in terms of explaining the CPU time for each step in an activity rule. In addition to activity rules, it also traces when rules and data transforms. Profiler traces the whole system and impacts performance severely. So we should stop running Profiler immediately after troubleshooting. Tracer is useful in tracking all these rules, so usage of the Profiler is extremely rare unless we need to trace inline whens. Performance Profiler can be started by from the Performance landing page. Diagnosing Production Performance Testing Tools When it comes to debugging production applications, most of the issues related to application performance must be solved, however there may be some lingering issues which will require us to reproduce the same issue in other environments if it requires an application fix. Let's take a look at tools that a developer can use while testing a PRPC application. 1. Pega Predictive Diagnostic Cloud (PDC): When it comes to diagnosing or proactively monitoring production issues, Pega offers two separately licensed products. Pega PDC is extremely useful in proactively monitoring nodes and sending reports to concerned users. PDC is hosted on the cloud. Autonomic Event Services (AES) is the other tool which is similar to PDC, except AES is an on premise installed tool. PegaRULES Log Analyzer (PLA) is a lightweight tool and can be installed by the lead developer on their own system. It can be downloaded from the PDN for free. 2. System Reports: Export the results of pr_perf_stats table so it can be used for offline analysis in a non-production environment. 3. System Management application (SMA): SMA is shipped with Pega 7 and can be deployed as an EAR or WAR application. SMA can be deployed in any Pega 7 server and can be used to monitor multiple Pega 7 servers. SMA can be configured to connect to the production servers if required. Pega 7 supports security using the PegaDiagnosticUser Role so access is restricted. Using SMA against a live production server comes with a huge performance hit, so we should be cognizant of the reports we are running from SMA. a. Log-Usage Reports: Useful tool that is launched from SMA to check various PAL statistics across the whole system. b. Log files: These files can be accessed from the SMA if access is enabled. SMA also allows setting logging levels on specific loggers by selecting Logging Level Settings under the Logging and Tracing category. This opens a screen that allows us to set the logging levels. We can select a logger and then check the current logging level for that logger. We can set it to a different level if we need some additional debugging. We also have the option to reset all the loggers to their initial level, which must be done in a production system after the issue is resolved. c. Tracer: Tracer is useful to trace any requestor session, which can be done from the Requestor Management Page. However, it is important to know that tracer adversely impacts performance, so it should be run with caution. We should attempt to debug using other tools and try to reproduce the issue in another environment before running tracer. When running tracer, we should control what is being traced. Using the settings icon, enable only the rules that we want to trace, enable abbreviate events and disable logging on the screen. The tracer output should be saved and interpreted using the Tracer Viewer. We can also use PegaDiffer which is extremely useful in production systems to compare two different systems using the tracer output files taken from those systems. d. Clipboard: The clipboard can be launched from SMA to lookup requestor sessions. The clipboard can be used to check the size of different pages in the requestor session or in the global session. e. Performance Details and Profiling: We can run PAL or performance profiler on a specific requestor session from the Requestor Management Page. f. DB Tracer: DB Tracer can be run on any session to debug DB issues. DB tracer is expensive in terms of memory and other resource consumption but can be relied on to look at debugging database issues. Using DB tracer helps to displaythe stack traces to find the hidden problem. Requestor Management Page in SMA provides access to the tools listed above. Using SMA we can also run the Global DB tracer to trace DB sessions across the whole node. Similar to Tracer, we should enable only the options that we are interested since it is being run on a production system. Performance Debugging in Production Environments Testing performance in a production environment, can be done; 1. After the issue occurs, to diagnose the cause of the issue and fix it quickly. 2. By constantly monitoring the system to identify the potential candidates which might cause performance issues. Pega Predictive Diagnostic Cloud (PDC), is an extremely useful monitoring tool that can be configured to receive alerts and exceptions from Pega 7 systems. Alert and exceptions can also be interpreted easily using the PegaRULES Log Analyzer (PLA). If a PDC license was not purchased, you will need to use the PLA. Another tool that is useful in on a production system is the Log-Usage reports. These reports provide hourly statistics showing time spent, CPU usage, memory, network and other key performance statistics. We will learn more about this later in this lesson. Time becomes interesting in production and receives significant importance. If performance testing is always performed in development, most of the issues relating to counts should be addressed. Alert logs provide clues to where the problem is potentially in the system. Alerts are usually one of these types: Threshold Alerts: Threshold Alerts are written when the threshold value such as elapsed time, data size that is being read or written exceeds the default threshold value. For example, if the log file has a lot of PEGA0026 alerts, then we need to add more database connections in the connection pool of the application server. If there are lots of PEGA0037 alerts, then it might suggest that we run the static assembler or maybe the database connection is slow, in such cases other database alerts will also occur. Observation Alerts: Alert messages written based on what happened or what is being noticed. For example, PEGA0028 which is an alert generated when the Garbage Collection (GC) process cannot reclaim enough memory to remedy the performance impact. Occurrence of this alert indicates that Garbage Collection statistics need to be collected on the node where this occurs. Event Alerts: Alert messages indicate an event, such as the server restarts, agent disabled, cache disabled and so on. For example, PEGA0008 is written when the server is started. If this is in the alert log file then it might be a problem if there is no scheduled restart. It might mean that there is another alert PEGA0036 indicating the server is shut down. This alert in a development system may not be that critical because the servers may go down more frequently but in a production system this could be a problem. Summary Alerts: Alert messages belonging to this category are usually a consequence of other alerts. This alert can be handled only by addressing the other alerts. Quite often this alert is not indicative of any one particular long running process but instead indicates there are other alerts associated with this to cause this alert. PEGA0001 is a good example . This is a most commonly observed alert and when this alert is thrown we need to identify other alerts that are thrown along with this alert. We can do this by looking at the interaction ID field. Check for all the performance and security alerts and clicking on them opens the corresponding PDN article for that alert. The article on individual alert also provides information about how to resolve these alerts. Alerts can be seen in multiple places and in production it is usually not possible to login to Designer Studio and click the Alerts icon to open up the My Alerts window. Therefore, request a copy of the alert logs and open it using PLA. Pega Diagnostic Cloud Pega PDC, as the name suggests is hosted on the Cloud, and is managed by the Pega Cloud team for all customers who license this product. After acquiring the license, administrators or lead developers need to configure the Pega 7 Server to push alerts and health messages to the Predictive Diagnostic Cloud. A single Pega PDC is usually capable of monitoring several Pega 7 servers that are running on different environments (development, staging or production). Pega PDC is accessed from a browser by entering the URL which is typically. The xxxx at the end of the URL varies by customer and contains a set of alphanumeric characters. After logging in, users see the PDCManager portal. The Portal provides varied sets of dashboard reports that are extremely useful in identifying the issues. There are also adhoc reports that are available in the reporting tab on the portal. The landing page menu offers the ability to automatically send the scorecard reports by email. This can be configured using the landing page menu in the portal. On the landing page, we can configure email specific reports by entering the email IDs of users who can assist in debugging the issue. The manage subscriptions tab can be used to setup the users to subscribe to these reports so that they are delivered on a schedule. 488 PDC is useful not only in identifying the top 10 issues but also in providing recommendations about how to fix these issues. The Top 10 Performance Items report identifies the top 10 issues; each has a unique ID and is an action item case. The description field provides the recommendation and in some cases cause of the issue. Click the ID to see additional details. Notice that fixing these 10 issues provide a 67% performance improvement which is a big gain. It should be our goal to not just fix these top 10 issues but get the list closer to a zero. Some actions include assigning the action item to yourself , assigning it to someone else or resolving the action item. In some cases other than the DB alerts, we need to look at the Analysis tab which lists all alert occurrences and the associated data such as the rules, PAL data, frequency of occurrence, node where the alert occurs and so on. Similar to fixing top 10 performance issues, it is highly recommended that we fix the top 10 exceptions report which appears right below the top 10 performance report. Fixing exceptions in most cases improves application performance, so it makes lot of sense to fix them immediately. Exception report parses the system log and displays the stack trace. It is quite helpful to use Pega PDC rather than reading the log file for locating stack traces. Another important report can be found in the Top Offenders tab on the dashboard. System Activity Comparison Report This report provides a summary in terms of how the system is performing compared to the previous week. PDC automatically highlights the important metrics such as the alert counts, exception counts and the average response times. This report also indicates that the number of alerts has increased considerably this week compared to the previous week. In addition to the dashboard reports, PDC presents additional reports. Let's look at some of them. For some reports the data can be filtered by selecting the start and end date. All Alerts report provide a comprehensive list of all alerts but if we are monitoring the system constantly, Recent Alerts report should be the safe bet to see what is going on. Using the reports that are listed in With Date Range category, we can check how these alerts are distributed by day and by node. If a specific node is reported then these reports are useful in checking the alerts on that node If a specific time is reported then we can use the day to figure out the distribution. Lead developers should use Action Item Reports category to monitor the progress of the action items. Use various filters to drilldown to a specific action item. If the customer has various applications onsite then the Enterprise Charts and Enterprise Reports category can be used to find the specifics of each application. There are also some relevant standard reports that are available to review. The reports can also be run for different Pega 7 servers by selecting the system before running the reports. PegaRules Log Analyzer PegaRULES Log Analyzer (PLA) PLA is used if Pega PDC licensing is not purchased. PLA can import system logs, alert logs and GC logs. The log files can be downloaded directly from the Logs landing page in the Designer Studio by clicking System >Operations> Logs. In production systems, it is usually protected using a password typically set in the PegaDiagnostic User Role. If developers cannot login to the Designer Studio (which is true in most production systems) then they can use the System Management Application (SMA) which allows downloading the log files from the Logging and tracing section. Again we can set up role based security and the files can be downloaded in zip or in text format. The third option is to download directly by logging in to the application server. The location of the log file is usually configured in the prlogging.xml that is defined as part of the prweb.war in case of war implementations or the prresources.jar. An administrator can download the alert files as a text or as a zip and then email them to developers for offline analysis. The developers can use PLA which parses the alert log files and provides some abilities 493 to categorize the results. PLA data can be exported using the Excel which can be handed over to the development team, but we can look at the list of all alerts in the log formatted in the PAL Alerts screen. The items in blue under the Msg ID column are hyperlinks to more information about that alert on the PDN. PAL statistics are collected in production systems, however it is not feasible for developers to run the Performance Tool in production. So, how do we leverage the Performance details? This is where Log- Usage reports come in use. Log-Usage reports can be viewed in the SMA by clicking Collector and Log Usage. We can click the node ID and use View to look at the results on this page or Logging and Tracing category > Garbage use the CSV button to export the data as CSV file for offline analysis. The Log-Usage statistics report in the SMA displays key statistics which can help in looking at the time elapsed, rule counts and bytes being read from BLOB and so on. 495 Log usage can also be used to get the hourly statistics which helps to narrow down when the issue occurred. This might be useful for cases when we need to determine what exactly happened at a specific time. In a customer application, the system performed badly at a specific time every single day. When looking into it further, they found that this behavior happened because a system agent was starting at that time and it usually processed a lot of cases as it is run only once a day. Administrators who have access in production can also export the My Performance details report that can be accessed from the System > Performance > My Performance Details page. PAL statistics are collected in a database table named pr_Perf_Stats, DBAs or application administrators can package all the records in that table and import it into another instance for developers to debug. In addition, Pega 7 ships with several reports in the Log-Usage class (that is mapped to pr_perf_stats table). Take a look at these reports and if required create customized versions of these reports to which you can subscribe to by email. System Tuning Application Server Tuning Pega 7 is a JEE application that is hosted on the application server. It is crucial that the JVM arguments are configured so that Pega applications use the JVM memory appropriately. Properly setting these arguments includes various factors and getting this correct in the development stage helps to ensure the system scales and performs well in production. When tuning JVMs for performance the main thing to consider is how to avoid wasting memory and draining the server's power to process requests. Certain automatic JVM processes, such as Garbage Collection (GC) and memory reallocation, can chew through memory if they occur more frequently than necessary. VM Heap Size less than 3% of the execution time. The goal of tuning the heap size is to minimize the time that the JVM spends doing Garbage Collection while maximizing the number of clients that the Application Server can handle at a given time. It can be tricky to determine the most balanced configuration . When setting JVM, we need to make sure the heap size is set correctly, use -Xms and -Xmx to set minimum and maximum heap size. The recommended setting from Pega is to set both of them at 4096m. Set it bigger if the application requires supporting more users per JVM. When using Oracle JVMs, there are other parameters such as PermSize and NewSize are configured to set additional heap size allocations. Check with JEE expert or recommendations from IBM or Oracle for specific instructions. The heap sizes should be set to values such that the maximum amount of memory used by the VM does not exceed the amount of physical RAM available. If this value is exceeded, the OS starts paging and performance degrades significantly. The VM always uses more memory than the heap size. The memory required for internal VM functionality, native libraries outside of the VM, and permanent generation memory (for the Oracle JVM only: the memory required to store classes and methods) is allocated in addition to the heap size settings. The heap has two areas - nursery (young) and tenured (old) generation. Every clipboard page created by the application is allocated in the nursery space. If that clipboard page has been active for a long time, then it is moved into the tenured space. The nursery scavenger collection runs 20 to 50 times more than the Concurrent Mark Sweep (tenured generation) collector. The goal is to have the nursery big enough so most of the objects get collected in the nursery. Garbage Collection Capture GC by appending -verbose:gc to the JVM arguments to collect garbage collection statistics. Use -XloggC in case of Sun JVMs and -Xverbosegclog in case of IBM JVMs to capture the GC output in a log file. IBM JVMs use Mark Sweep Collector, for Pega applications, set -Xgcpolicy:gencon since gencon (Generational and Concurrent) policy is optimized for highly transactional workloads. Gencon GC considerably reduces the time spent of garbage collection by reducing the wait times. For Oracle JVMs, we use -XX:+UseConcMarkSweepGC, and there are additional settings to be configured such as TargetSurvivorRatio, policy for nursery objects GC, and so on. What tools are good to analyze the Garbage Collection results? 1. PLA: Garbage Collection logs can be studied using the Pega supplied tools such as PLA (PegaRULES Log Analyzer). Import the GC log in PLA and look for the GC summary in the Manage Data tab. It displays the % of GC Time and like we saw earlier, it should be lesser than 3%. PLA also offers GC summary reports and 2. GC reports in SMA: We can run adhoc GC reports from SMA in Logging and Tracing > Garbage Collector and Log Usage. We can import the GC log directly in the SMA to look upon certain key statistics. 501 Pega recommends using a separate JVM for agents, especially in multi-node systems. On an agent- specific JVM, use the following policy for Garbage Collection: Xgcpolicy:optthruput since the agent sessions are short lived and setting Gencon or concurrent mark sweep is expensive in this case. 3. PMAT: IBM PMAT (Pattern Modeling and Analysis Tool) can be downloaded from the IBM developer works community and used for looking up GC reports. To analyze GC, we need to import the GC log file. PMAT parses the IBM verbose GC trace and provides a comprehensive analysis of the Java heap usage by displaying them in charts. It then recommends key configurations by first executing a diagnosis engine and then employing a pattern modeling algorithm in order to make recommendations to optimize the Java heap usage for a given JVM cycle. If there are any errors related to the Java heap exhaustion or fragmentation in the verbose GC trace, PMAT can diagnose the root cause of failures. PMAT provides rich chart features that graphically display the Java heap usage. PMAT offers various statistics, but one of the key statistics to be aware of (Total Garbage Collection Duration) can be found by clicking the Statistics icon on the top. In this example it spent approximately 0.04% and this number should always be less than 3%. 4. HPJMeter: When using Oracle JVMs, HPJMeter is a pretty useful tool in interpreting GC log information. There are several other tools available in market such as JConsole and it is up to the discretion of the person who is responsible in tuning the performance. Application Server Tuning Tips Unlike some of the other performance areas (which should be checked periodically as the application is being built), tuning for memory usage should be done after the application is completed. Begin the tuning process by enabling verboseGC. Note your start time and then start the application and run it with several users for some amount of time, making sure to sample all the processing. After all the features have been exercised, close the application, noting the stop time, and review the GC log. There are a number of application issues that may be highlighted by the data in the verboseGC log, including: High volume of garbage Quick allocation failures The same object being loaded repeatedly Heap size not reducing with user reductions When any of these issues occur, it is important to be able to know not only what the JVM is doing, but also what the application is doing. Since it is difficult to tell application processing directly from the verboseGC log, the SMA tool can show the verboseGC log information (Garbage Collection perspective) juxtaposed with the Log Trace information (application perspective). This combined view of the statistics shows an hourly breakdown of the activity in the system, and allows us to see what the system is doing against what the JVM is doing, and how they relate. If there is a Garbage Collection problem, and too much garbage is being generated, we need to know if this is directly or indirectly related to the activities being run. Is a spike in Garbage Collection paralleled by a spike in processing? Is the growth across the two areas consistent - in other words are more activities being run because that means more garbage is being created— or is one growing faster than the other? Use this tool to trace Garbage Collection anomalies to actions in the application. Volume of Garbage primarily focusses on two factors, bytes collected in each collection and time spent by system in collecting the garbage (should not be more than 3%). If the number of bytes collected is large enough, check alert logs and system logs to see why the application is requesting so much data. Quick Allocation Failures - Check to see if the GC log file shows a series of allocation failures within a short period of time. Allocation failures can occur because the heap size is small or the objects are not released even if they are not used. Same Object Loaded Multiple Times - If the system is trying to load the exact same size object repeatedly, then there is something wrong on in the application and we need to figure out which rule is getting loaded and why it is not getting cached. Heap Size - When tuning the JVM, check the heap size when users are logging in or logging off. Both should impact heap size, if the logoff does not trigger reduction in the heap size there may be a potential memory leak. Memory Leak and its consequences Memory leaks may be negligible in some cases but when left alone, on a system with lot of concurrent users working on applications that uses a huge page size it might trigger a bigger issue. Typically, this results in either decreased response times due to constant Garbage Collection or an out of memory exception if these objects cannot be removed. In addition to tuning JVM, there are several recommendations from Pega, refer to- servers for additional information. Alert Thresholds Alert Thresholds Performance Alerts help in ensuring the application delivers expected performance. However the alert threshold values that are set by default may not apply in certain cases which might cause an overload of alerts in log files. PEGA0001 (Browser Interaction) - Summary Alert The most frequent alert, the default threshold value set by Pega is 1000 milliseconds and can be modified to meet the expected time taken to render a screen, which are usually less than 3000 milliseconds. The threshold value can be modified by using Dynamic System Settings (DASS). We can define a new DASS instance with the setting value of prconfig/alerts/browser/interactionTimeThreshold/WarnMS and set the value in milliseconds. PEGA0004 (DB Bytes Read) - Threshold Alert This alert is written to the log file when the data received by the database query in a single interaction exceeds the threshold value. This alert can be enabled to behave either as a warning or as an error depending on the size. There are two separate settings that can be modified using DASS: prconfig/alerts/database/interactionByteThreshold/warnMB for warnings prconfig/alerts/database/interactionByteThreshold/errorMB for errors By default, warnMB is set at 50 and it displays a warning message when the data exceeds 50 MB. However, errorMB is set to -1 because when errorMB is reached, it displays the error message along with the stack trace in the UI in addition to recording it in the log file. When errorMB is reached, it also stops processing the query. It becomes absolutely mandatory to set errorMB in production systems to prevent unbounded queries that can bring down the system. When this error occurs, we need to look at the database query (Report rule like the report definition or activities using Obj-browse method) and check what is being queried. Typically in most cases we might be querying the entire record instead of what we really need. We can fix the error by doing one or more of the following: 1. Modifythe query to return only the columns that are required. 2. If more than one row is returned, apply appropriate filter criteria to get only the results that will be used. 3. Also set the maximum number of rows that can be returned in the report rule. This is one of the five alerts that are marked as Critical implying that it needs to be fixed before the application goes to production. PEGA0019 (Long running requestor) - Observation Alert This alert indicates that a requestor has been servicing one interaction for a long elapsed interval. This can arise from an infinite loop, or because a request is made to a database that has failed. There are two settings that can be modified for this alert: prconfig/alerts/longrunningrequests/notifcations - this is set at 3, which means the agent tries 3 times to delete the requestor before sending the alert and prconfig/alerts/longrunningrequests/requesttime - this is set at 600, which is 10 minutes, and this is the time it waits before trying to delete the requestor again. 506 PEGA0030 (Requestor Limit) - Threshold Alert This alert is written to the log file when the numbers of requestors logged on a single Pega server exceed 200 (the default value). This alert helps us to decide how many servers we would need and if the load balancer is distributing the requests equally amongst all servers. This can be modified by creating a new DASS using the setting value of prconfig/alerts/requestors/count/threshold Setting the Alert thresholds to meet the business service levels prevents from writing too many entries in Alert logs, similarly we need to set the System Logging Level to ERROR so that entries below that level (WARN, DEBUG and INFO) do not appear in the log files. Capturing all information in log files add severe overhead in I/O in terms of writing these log files. Tuning Database Tuning Database Database plays a key role in the performance of applications built on the Pega platform. The system uses several caches to limit requesting data from database for accessing rules; however database operations while creating, updating and resolving cases always play a key role in application performance. Pega tables use a BLOB column in their tables which provide flexibility in terms of defining structures but come with a cost when extracting information from BLOB. We can optimize scalar properties which create additional columns and optimizing page list properties creates declarative index tables. Optimizing too many properties means that each database row becomes big and the performance is impacted with additional processing and space overhead: The more exposed columns, and the more native database indexes we have, the more expensive each read, update, or delete operation becomes. When it comes to tuning databases, involving DBA's is critical, however Pega offer several tools to help in identify issues when it occurs. One of the most important tools is the Alert log which highlights several DB alerts such as: Alert Category PEGA0002 - Commit operation time exceeds limit DB Commit Time PEGA0003 - Rollback operation time exceeds limit DB Rollback Time PEGA0004 - Quantity of data received by database query exceeds limit DB Bytes Read PEGA0005 - Query time exceeds limit DB Time PEGA0025 - Performing list with blob due to non-exposed columns Reading Blob Need PEGA0026 - Time to connect to database exceeds limit Acquire DB Connection PEGA0027 - Number of rows exceeds database list limit DB List Rows PEGA0033 - Database query length has exceeded a specified threshold DB Query Length PEGA0034 - The number of declare indexes from a single interaction exceeds a Declare Index threshold PEGA0039 - The size of a BLOB column read exceeds a threshold Blob Size Read PEGA0040 - BLOB size written to the database exceeds a threshold Blob Size Written PEGA0042 - Packaging of database query has exceeded operation time threshold DB Query Time Alerts The highlighted alert codes above are key critical alerts and their threshold values can be altered by creating a DASS instance using the following setting value. prconfig/alerts/database/operationTimeThreshold - sets the alert threshold value for all the highlighted alerts, the default value is 500 milliseconds. prconfig/alerts/database/packagingTime/warnMs — sets the warning threshold value for the operationTimeThreshold value for PEGA0042 only. Set this value lesser than the operationTimeThreshold so the alert provides a warning if the operationTimeThreshold is in danger of getting exceeded. BLOB and Declare Indexes PEGA0025, PEGA0039 and PEGA0040 alerts are related to the BLOB. The alert message is thrown immediately as soon the threshold is exceeded, and we need to look at PAL to find out exactly how much BLOB size is read or being written. Again these can be reduced by exposing the frequently accessed properties as columns and also looking at the application is requesting more information than it really needs. 508 PEGA0034 indicates that the number of declare indexes getting written exceed the default value of 100 instances. Optimizing pagelist properties enhances reporting performance but we need to make sure we are not impacting the performance by exposing all pagelist properties which causes a slow down during update. In addition to this, consult with DBA for setting LOB (Large OBject) tuning parameters in the database as these differ for each database vendor. Some strategies include setting the chunk size, caching strategy for LOBs, indexing the LOB column and so on. Look in- v54 for more information. Size Alerts PEGA0027 is raised when the threshold value of 25,000 returned rows is exceeded, this may be a bigger number in some applications and the threshold value can be altered by creating a new DASS with the setting value as prconfig/alerts/database/rowReadThreshold. PEGA0033 is disabled by default and should be enabled in development system to check the query length that gets generated by the system. Investigate the report definition in terms of what feature is being used (functions, joins, filters, formatting data using SQL expression) and look up the query generated using tracer. Use Explain plan in the database to tune the SQL query. Connection Alert PEGA0026 occurs periodically if more users are accessing database at the same time and there are no connections available in the connection pool. Each database operation should not exceed few seconds, if this alert occurs despite the database performing quicker then we need to modify the connection pool setting in application server. The connection pool must be set to 200 and then increase the number if lot of PEGA0026 alerts occur. Other Database Tuning Considerations 1. Do not write queries using Connect SQL explicitly, if writing explicit queries use proper tools to verify the query before uploading to production. If using Oracle generate AWR report for the SQL statement. 2. Pega writes lots of information in logs, change the configuration file to write the log file in a system different than the one where database files are stored. 3. When using reporting in Pega, report statistics are collected by default. Disable this in production system by creating a new DASS Setting for reporting/enablestatistics to reduce the overhead of writing this data. Unused Agents Disabling Unused Agents Pega comes with several standard agents which are in locked rulesets. It is important to review and tune the agent configuration on a production system since there are standard agents that: Are not necessary for most applications as they implement legacy or seldom-used features Should not ever run in production May run at inappropriate times by default Run more frequently than is needed - which might cause issues on a large multimode system By default run on all nodes but should only run on one node maximum This can be done by changing the configuration for these agents, update the agent schedules generated from the agents rule. Let's have a look at some of these standard agents by walking through them by ruleset. Pega-AppDefinition The agents in this ruleset are mainly used for integration with the Project Management Framework (PMF). Disable the agents in production and other controlled environments since PMF integration is usually done only in development to track the application development. Pega-AutoTest Disable the agents in production. The agents in this ruleset run test suites periodically and should only be enabled in other controlled environments such as QA only if that feature is used. Pega-AESRemote Disable the agents if AES is not used. Pega-EndUserUI Make sure the reoccurrence time setting does not conflict with when the system is used for the DeleteOrphanTags and PurgeRecentReports agents. Disable the DeleteOrphanTags if your application does not use Tags. Pega-Event-Processing Agent SystemEventEvaluation is rarely used in most applications. This should be disabled. Pega-Import-Export The agents in this ruleset support the purge/archive wizard and support certain one-time post-upgrade processing. Disable the agent if the purge/archive feature is not used, which is typically the case. Pega-IntSvcs The agent checkPrintErrors, checkFaxErrors, purgeRequestsTable support the PegaDISTRIBUTION Manager and should be disabled unless that component is installed and used, which is very rarely the case. The ProcessConnectQueue and ProcessServiceQueue agents support asynchronous connectors and services respectively and should be disabled unless the feature is used, which is very rarely the case. Pega-ProcessEngine The frequency for the ProcessFlowDependencies agent may be 'turned down' depending on how the application makes use of the functionality. 510 Pega-ProCom The frequency for the AgentBulkProcessing agent may be 'turned down' for some applications. The frequency for the SendCorr agent may be 'turned down' for some applications. The GenerateStartingFlows agent updates the developer portal to add starting flows for unit testing. Disable this agent in production. The ServiceLevelEvents agent is used for processing SLAs and this runs every 30 seconds and maybe tuned down for some applications. There are three other settings namely 'slaunitstoprocess', 'slaunitstoretrieve' and 'slarefreshlisteachiteration'. The first two settings help in configuring how many cases are processed and retrieved each time the agent is run, this is set based on the throughput in terms of number of sla events being generated during each run(30 seconds or whatever the new value is). The third parameter namely slarefreshlisteachiteration is disabled by default but it is useful when multiple nodes are configured to process ServiceLevel Events to avoid contention. Look in the following PDN article for additional information.- pega-procom-sla-agent Pega-RuleRefatoring The agent in this ruleset supports the ruleset maintenance wizards. Disable the agent in production. Pega-RULES Change the SystemCleaner schedule from every 24 hours to a daily off peak time. Change the SystemIndexer agent execution from every 60 seconds to every 600 seconds since rule changes occur rarely in production. The agent should only run on the index-owning node. Change the RuleUsageSnapshot agent schedule from every 24 hours to a daily off peak time. Pega-RulesEngine Change the frequency of the PurgeAssemblyDatabaseCache agent to weekly in production. The ScheduledTaskProcessor agent runs scheduled report definitions and may be 'turned down' depending on application. The PropertyOptimization agent is used by the Property Optimization tool. Disable this agent on production. Change the frequency from daily to weekly in production for the DeleteUnusedCSSFiles agent. Purging and Archiving. Using SQL scripts to archive or purge cases typically offers better performance and scales better than the archive agent. Setup Purging/Archiving Cases are saved in multiple tables, the work items typically get saved to a custom variant of the pc_work table. There is a history table which saves all audit trail information for the work item. If the work item includes attachments they are saved in a separate table. If the work items are using folders then there are entries in the link tables to link the folder and the work items. There may be additional tables depending on how the application is configured. For example, there might be index tables created to support declare indexes used for reporting. By default all work items save work parties in index tables. The following process is recommended when setting up archival on database level: Create a set of mirror tables within the production PRPC schema which parallels the main PRPC tables and holds the cases which need to be archived (such as PC_WORK_ARCH for archiving entries on PC_WORK). Write a set of database stored procedures that take work from the active tables, place it into the archive tables and remove the archived work from the active tables. When the mirror tables are populated with the archive data, the export of these tables is done by the DBA team by scheduling a regular extract job. Mirror tables are then cleared of data and are ready for the next extract. All types of work objects are part of a parent-child hierarchy. A parent case may have several types of child cases (subcases); the child cases themselves may be parent cases for additional subcases. Archival happens at the top case level only. Child cases are only archived if the top parent meets the archival criteria. Once the SELECT scripts produce the desired results, change them to DELETE. The order of the tables are important, work backwards. Schedule Purging/Archiving A Pega agent can be used to schedule the archival, but a UNIX script (cron job) or any other means of scheduling could also be used if preferred. Retrieve Archived Cases A stored procedure can be used to extract the mirror tables back into the production database when/if requested to restore cases. Alternatively a copy system can point to the mirror tables allowing archived cases being accessed through a separate application. DB Maintenance We recommend that you perform the following database maintenance tasks periodically: 1. Perform statistics-gathering on the database, in some cases you might want to perform this task daily. Most Databases support self-tuning that can be automated to run this on a regular basis. 2. Pega recommends setting the initial data file size to 5 GB for the database using rules and then allowing it to grow with automatic extension of data files. Log files should be sized such that log file switching occurs every 20 minutes. Typically this is accomplished by increasing the size of the log files. 3. Rebuild Indexes regularly if required. This can be determined by analyzing the indexes periodically. If there are a large number of PEGA0005 alerts, it might be useful to index all the properties that are used in the WHERE clause. 4. Run Explain Plan on the database periodically if there are a lot of database alerts in the log files. The following tables are small and volatile, and thus vulnerable to fragmentation. Defragment them regularly by re-writing the table and rebuilding the indexes. In addition it may be advisable to cache them in a separate database buffer pool. Share the following list with your DBA for tuning. pr_sys_locks - PRPC keeps records of locks held on cases in this table. This table is a primary table that is subject to fragmentation and it needs to be rebuilt on a regular basis on some database platforms on a regular basis. pr_sys_updatescache - Rule changes are recorded in this table for synchronization to other PRPC server by the System Pulse in a multi-node environment. pr_sys_context - Holds records for passivated requestors. pr_page_store - When a user session times out, the system saves a requestor's entire thread context in this table. pc_data_unique_id - Holds the most recent assigned case ID for each ID-format in use. This table is very small and contains the same number of rows as cases in a PRPC system, since it is used to get the next unique id for a case it can get fragmented due to frequent updates of these few rows. pr_sys_*_queues - These tables hold items from the various queues maintained in PRPC so like sys locks these tables are subject to a lot of change and hence churn. Security We already know that it's important to secure an application. We do the due diligence to make sure we set up the correct security. Correct security entails users only be able to access cases they are allowed to access and only seeing data they are allowed to see. In this lesson, we'll examine some of the common mistakes that can open up vulnerabilities in the system, and how to address them including some Best Practices to help us avoid potential vulnerabilities. Then we'll finish up with the use of the Rule Security Analyzer and learn how to integrate this routine check into all of our projects. Common Mistakes that Lead to Security Vulnerabilities We already know that it's good idea to follow the guardrails. But we might not be aware that the guardrails also protect the security of our applications. PRPC by default has several built in protections to deal with things such as injection or cross site scripting attacks. When we deviate from the guardrails, we can unknowingly bypass these protections and open ourselves up to vulnerabilities. Common Types of Security Attacks The Open Web Application Security Project (OWASP), which is a non-profit organization focused on software security, has documented a 'top ten' list of the most critical web application security risks. For the sake of this lesson, we'll review their top 10 as of their 2013 findings (the full report can be found on their website at): 1. Injection (SQL, OS, LDAP) As we can see, the most common type of security risk is injection. Let's take a look at how PRPC combats injection and what we can do to prevent it. Protecting Against Injection Attacks First off, what is an injection attack? Injection vulnerabilities come from providing user entered values directly to an interpreter, such as SQL. Consider the following query: Select pyCreateDateTime from pc_work where pyID = "some user provided value" If the user was allowed to directly provide the input to this query, they could provide something like: "W-1; Truncate pc_work" When this is provided to the interpreter, if it isnt' caught, the person could wipe out the entire work table. Thankfully, in most situations PRPC doesn't allow this to happen. Report definitions and other standard rules make use of a prepared value instead of directly inserting user values into the SQL statements. This practice prevents the system from treating the user supplied value as native SQL. So what do we want to watch out for? Use of non-standard practices, such as directly connecting to the database via Java , using of one of the RDB methods or using one of the RDB APIs, such as executeRDB can lead to vulnerability. These approaches should be reserved for the rare cases when it is not possible to achieve the desired outcome using standard rule types, such as report definitions or the relevant Obj methods. Of course if we must absolutely use one of these then we must never allow users to directly provide query parameters. Protecting Against Broken Authentication and Session Management Attacks Broken authentication and session management vulnerabilities result from exposing user account information. Attacker's use these flaws to gain valid account information and can then impersonate the account to gain access. Thankfully, PRPC takes measures to prevent this from occurring. PRPC handles this concern by not providing the authentication header after the user has logged in. Plus, PRPC expires old sessions when they time out. However, sometimes businesses request unreasonably long time outs. For example, a timeout of 24 hours. These should already be avoided for their performance impact, but it also helps during a security audit to be able to confirm user sessions are invalidated after a reasonable time. Protecting Against Cross Site Scripting (XSS) Attacks This is probably one of the most famous types of attacks. Knowledge about protecting against these attacks and improvement in browsers has led to the OWASP downgrading this attack from being the second most critical risk to the third most critical. Cross site scripting is similar to an injection attack, in that a user can provide input that allows code to execute. For example, let's say we have a screen where a user can enter free form text. We then display the text the user enters back to them in a confirmation screen. So, if a user enters something like the following into one of those fields <script>alert("some kind of code")</script> and, if we don't escape the user entered input, this gets directly entered back into the HTML of the confirm screen, allowing the script to run. Thankfully, PRPC automatically escapes all values, as long as we follow the guardrails. Do not use mode="literal" in a <pega:reference> tag or access the value directly in java with tools.getActiveValue() without passing it through the StringUtils.crossScriptingFile() function. The easiest way to avoid these is to only use autogenerated UI rules. Of course, if you must use a non- autogenerated rule, always ensure the value has been properly filtered and escaped before displaying it back to the user. Protecting Against Insecure Direct Object References We don't need to worry about protecting ourselves from insecure direct object references. The references to objects are all stored on the clipboard instead of being passed back and forth in the URL. PRPC's model does not provide direct object references, so they can't be considered insecure. Protecting Against Security Misconfiguration and Sensitive Data Exposures These are squarely on us to manage, but, we should already be doing this. We already know that we need to set up the right roles, privileges and access. And we already know that it's not a good practice to reveal sensitive data, such as Social Security Numbers. However, it is a good idea to occasionally review the work of any junior developers, to ensure they're also following these good practices. The Rest of the List We don't need to concern ourselves with the following vulnerabilities since PRPC natively prevents these from occurring based on the way it handles every request or transaction. Missing Function Level Access Control Cross-Site Request Forgery (CSRF) Unvalidated Redirects and Forwards Things like redirects don't impact a PRPC system. But, Using Components with Known Vulnerabilities is a concern to us. However, this is outside the realm of a PRPC domain. If there are flaws found in the current versions of java, the application server, or other software then we should be working with administration team to ensure they update this asset to the latest version, thereby patching the security holes. The Rule Security Analyzer As diligent as we are, we just can't check every line of every rule that every one of our junior developers create. Thankfully PRPC provides a tool to scan all the custom code in the system for known security risks. This tool is called the Rule Security Analyzer. To launch the tool, we select Org & Security -> Tools -> Security -> Rule Security Analyzer. 524 The tool then opens in another window where we can specify which rulesets to scan, which rule types to scan and which regular expression to use for the scan. PRPC by default has several expressions already defined for us to use while searching for various vulnerabilities. After running the expression, we get a report of the number of rules searched, as well as any that have an issue. 525 From here, we can then identify which rules need to repaired. Ideally, the original developer who created the risk would be notified and be responsible for correcting their mistake. Fitting into a Project So when is a good time to run the analyzer? Right before a security audit? Just before QA? A best practice is to run the Rule Security Analyzer before locking a ruleset, be it for migration or for any other reason. This allows us to identify and correct issues in rules before they are locked. The tool only takes a couple of minutes to run through the different expressions, and it brings a large piece of mind to know that no security risks are potentially being deployed. Authentication The Basics of Authentication So what is authentication? Authentication is proving to the system that you are who you say you are. This should not be confused with Authorization, which is determining what rights you have in the system. The two may go hand in hand regarding how they are executed, but are two different animals in reality. So, what goes into Authentication? At the very minimum, Authentication requires the use of some kind of user identifier. This UserId is what tells the system who you are. Our authentication could be as simple as: "Hi, I'm Bob" to as complex as: "Hi, I'm Bob, here's the proof that I'm Bob, and here's the word of this other guy that you do trust who says that I'm indeed Bob." Authentication types in PRPC By default, PRPC supports several different authentication schemes. All authentication schemes get classified as one of these types: PRBasic — Standard internal PRPC authentication based on Operator IDs and passwords stored in the system. PRSecuredBasic — Same as PRBasic, but encrypts the credentials using Secure Sockets Layer (SSL) PRExtAssign— Used for external assignments, such as those from Directed Web Access J2EEContext — Used for container managed authentication PRCustom — All other authentication schemes. These are mostly used with Single Sign-On processes. We won't get into PRBasic, PRSecuredBasic or PRExtAssign in this lesson. Typically we just use these authentication types as-is without any need for additional configuration. J2EEContext will be covered later in this lesson, when we discuss Container Managed Authentication. So let's take a look at PRCustom. PRCustom is the catch all for any other authentication scheme. This includes the various Single Sign On approaches, which are discussed in the 'Authetication using SSO' lesson, LDAP authentication, whichwas covered in the Senior System Architect course, and any other in house authentication that we might encounter. The web.xml file So, how do we specify which authentication scheme we want to use? The system can support multiple authentications all at once by mapping different authentications to different servlets fronting the PRPC system. These mappings are done in the web.xml file. The default web.xml that is shipped with a fresh install of PRPC is available in the related content for you to download. This file contains two important features. The definition of the Servlet itself. And, the mapping of the servlet to a URL: Note that the servlet gets mapped twice both to the root URL "/PRServletProtected" and to the children of that URL "/PRServletProtected/*". When specifying new mappings, we want to ensure we follow this same pattern to properly map both the parent and children. The servlet definition consists of four values, and then a series of parameters, identified as 'init-param'. All servlets will have a: servlet-name display-name description servlet-class The 'init-param' values vary depending on which type of authentication we are leveraging. The starter web.xml provides several samples we can use to get started with most servlets. One of these parameters is the AuthenticationType. This is where we specify our choice of PRBasic, J2EEContext, PRCustom and so forth. If this parameter is omitted the system uses PRBasic as the default. When specifying PRCustom, we need to also supply another parameter of AuthService. So, what is an AuthService? The AuthService is short for Authentication Service. This is where we provide the name of a Data-Admin-AuthService, which is a record we create in the system. This tells the system which particular custom authentication scheme, such as single sign-on or LDAP, to leverage. Auth Service To create an Authentication Service, we click the Designer Studio button and navigate to Org & Security > Authentication > Create Authentication Service. This launches the new form for our Authentication Service. On this form we have the choice of creating a service for either SAML 2.0 or a Custom authentication, the short description and the name of the authentication service. Since SAML (Security Assertion Markup Language) is leveraged for single sign-on, we'll cover that particular authentication service in the single sign-on lesson. Let's choose Custom and proceed with our creation. A custom Auth Service allows us to specify the Authentication Activities: These activities are where we provide all the necessary logic for our authentication. Thankfully, PRPC ships with several ready-made activities we can use as starting points for some of the more common authentication schemes. Not shown here, the system also allows us to specify the JNDI Binding Parameters and the Search Parameters. These are typically leveraged just for an LDAP authentication. The Mapping tab is also leveraged only for an LDAP authentication, and is covered in the External Authorization lesson. On the Custom tab, we have several options to control the behavior of our authentication. The first is whether or not to use SSL. If we elect to use SSL, and someone accesses the system via a non-secure protocol, such as HTTP, instead of HTTPS, the system will then use the HTML rule defined in the Initial Challenge Stream to redirect to the secured URL. This rule must reside in @baseclass class. The standard HTML rule Web-Login-SecuredBasic is used as the default if one is not specified. We can use this rule as a starting point for any customizations we might need to make to the system when using this feature The next option is used to provide a popup for gathering credentials instead of a standard login screen. The credential challenge stream is used to specify the HTML rule to use for this popup. As above, it must reside in @baseclass. The timeout options allow us to configure how the system behaves during a timeout. The Use PegaRULES Timeout option lets us choose between the timeout specified in the user's access group, or to let the application server handle the timeout. Use Basic Authentication is similar to the one for Challenge in that it instructs the system to use a popup instead of redirecting to a login page. Timeout challenge stream is the same as above, an HTML rule in @baseclass is used to gather credentials. The redirect URL is often used in single signon or container managed situations, where the user first authenticates against an external system. This is used to direct them back to that initial system. The last two options are used to display an alternative login screen when the user fails to authenticate and to choose which types of operators can login through this service. The source of operator credentials doesn't actually look for their credentials. Instead, it merely relates to the "Use external authentication" checkbox on the Security tab of an Operator record. If "Use externally stored credentials" is selected, then only operators that have the checkbox enabled on their record can login. If "Use Credentials Stored in PegaRULES" is selected, then operators need to have that box un-enabled. Now that we have an authentication service defined, we just need to make sure it's referenced in our servlet definition in the web.xml. Container Managed Authentication Container Managed Authentication is based on the Java Authentication and Authorization Service, typically called JAAS for short. This is a pluggable API that allows different sets of login modules to be used for each application. Typically, a system using JAAS also integrate with an LDAP server to store the user's credentials. The lesson does not cover JAAS concepts, or how to configure the application server to use it as every application server is a little different. It is important that you understand the JAAS concepts specific to your environment so that you can understand how PRPC works with JAAS. What we will concentrate on is how to configure PRPC to interact with JAAS. Let's assume for the moment that application server and LDAP entries have already been setup. Within PRPC, the first thing we need to do is to set up a servlet that uses JAAS. The standard web.xml that ships with the system already has one of these setup. Look in the web.xml file for a Servlet named "WebStandardContainerAuth". This can either be cloned to a new servlet or just used as-is. This servlet is mapped to the url-pattern '/PRServletContainerAuth'. If we were creating a new servlet we would need to clone these, but for the sake of this lesson let's just use them as-is. When using Container Managed Authentication, we still need to have an operator in the system. This is where the "EstablishOperatorExternally" activity comes in. This is an extension point activity, meaning the one provided is blank. We can override it to perform any necessary update on our operator, such as pulling in updated information from an LDAP connection or creating a new operator when one doesn't exist. When leveraging an LDAP system, it is typical for this activity to access the LDAP system again, not to authenticate the user, but to pull any additional, updated details. The important thing to note is that the step page of this activity is the intended operator page. Hence, any edits, updates or creations need to be merged into the current step page using a Page-Merge-Into. The PDN has examples of this in the PegaSample-IntSvcs ruleset available in the Integration Services and Connectors article () from the Integration topic. Though written for version 5.x, the samples still apply to version 7.x. Security Policies Security policies can be used to enforce such things as minimum password lengths or whether or not the system should use a CAPTCHA (Completely Automated Public Turing test to tell Computers and Humans Apart). They are disabled by default. To enable them, first navigate to the Security Polices landing page by clicking Designer Studio > Org & Security > Authentication > Security Policies. The landing page can also be accessed from Designer Studio > System > Settings > Security Policies. Both navigations open the same landing page so it doesn't matter which path we use to access it. Once there, when we click the Enable Security Policies button, we're reminded that these policies will take effect for the entire system and will affect all current operators. If we accept, the system opens up the policy settings for us to configure. These policies can be considered as three different functions. The first relates to the user's passwords. Within these settings we can enforce a minimum operator ID length, a minimum password length, the number of numeric, alphabetic and special characters required. All of these can be set to a maximum value of 64, but please be realistic. 64 character long passwords are overly difficult for most people to remember. The last two settings allow us to specify how many unique passwords need to pass before an operator can reuse an old one and the number of days before the operator must change their password. Both of these can be set to a maximum value of 128. The second relates to the use of CAPTCHA. A default CAPTHA implementation leverages the CAPTCHA shipped with PRPC. To use a custom CAPTCHA, first review the PDN article Customizing CAPTCHA presentation and function (). Enabling the CAPTCHA Reverse Turing test module allows the system to present a CAPTCHA upon an authentication failure, based on the probability specified in the next setting. When disabled, no CAPTCHA displays on a failure. The last setting, enable presentation of CAPTCHA upon initial login, tells the system to display the CAPTCHA if it is the first time somebody tries to access the system from a new computer. And the third is for Authentication lockout penalties. When enabled, if the user fails to login for the set number of times, a lockout penalty is imposed before they can login again. These penalties compound. For example, using the settings below, if a user has failed to login 5 times they must wait 8 seconds before they can try to login again. If they fail a 6th time, then they must wait 16 seconds (8 + 8) and if they fail a 7th time then they must wait 24 seconds (16+8). Note that the last one of these "Audit log level" applies to the lockout, and all the logins. It can be set to one of three values: None - No log entries are written Basic - Only logs failed login attempts Advanced - Logs all logins, both failed and successful These logs are available through the "Display Audit Log" button at the top of this landing page. These logs are written instances of the Log-SecurityAudit. If required, additional custom reports can be written against these instances if required. The "View History" button allows us to see an audit trail of all the changes to these settings. SSO Authentication Single Sign-On (or, SSO) makes it possible to login only once - typically outside PRPC - thereby gaining access to multiple applications, including those built on PRPC. There are multiple ways to drive SSO; we will cover only a subset of these in this lesson. Windows-based authentication incorporates a mechanism whereby logging into a PC provides the credentials for logging into other applications. This involves SPNEGO (Simple and Protected Generic Security Services Application Programing Interface Negotiation Mechanism) and Windows integrated authentication. It is also possible to use 3rd party desktop applications, such as SiteMinder, to drive SSO authentication. These applications can "screen scrape" to essentially push credentials through the login screen, or use "token security" to push out a token that is subsequently verified by PRPC. It's also possible to drive single sign-on through a customer website. With this technique, there can be a link from the website to the PRPC application, or PRPC can be embedded using Internet Application Composer IAC. The association from the external website to PRPC can be configured to require token validation, if desires. Other techniques, such as IP address control, can used to ensure a "trust" between the external website and PRPC. Single sign-on and the General Design Pattern Let's take a quick look at SSO in action to provide context for this lesson. This is an example of an external website redirecting to PRPC. This is a demonstration website - hence it has a somewhat minimalist design. First, we login. Once this is complete, the website displays a link to our PRPC application. Imagine several links here: some pointing to PRPC applications, others pointing elsewhere. The idea here is that the website handles the authentication, and then opens the door to other applications. We click the link... and we're in. There is a lot that went on behind the scenes in the few milliseconds after clicking the PRPC link, including token verification. So, let's take a look at this now. Let's start by looking at the basic design pattern of how authentication is handled. This framework extends to all types of custom authentication, not only including SSO, but LDAP as well. Custom authentication is identified in the Servlet Descriptor - the web.xml file. In the web.xml file, the AuthenticationType parameter must be set to "PRCustom" and the "AuthService" parameter must be set to point to an Authentication Service instance in the PRPC system. Below is the authentication service instance that this particular servlet points to. The Authentication Service instance points to authentication activity and the authentication activity handles the actual business logic that is required post-authentication. Authentication may involve token verification, or perhaps dynamically creating an operator instance if one does not exist yet. Once the activity is complete, the user is logged in. Because the business logic is entirely handled by the authentication activity, let's take a look at those first. Technically speaking, there is really only one absolute requirement for an authentication activity; it must identify an operator in the system so that PRPC can change context to that operator. Before the activity runs, the application context is essentially anonymous, treating all users the same. In addition to establishing an operator, authentication activities can also do the following: Create a new operator instance for the user logging in when one does not currently exist. Authenticate. Please do not be thrown by this. Authentication activities only authenticate in certain circumstances, for example with LDAP integration. They do not authenticate with SSO since, in this case, authentication is handled by an external system or website.. Let's take a look at an example authentication activity that was used for our SSO example. In this activity, the system validates that the SSO requests comes from a known system, by comparing the Application ID provided against one that's stored in the system. If that check passes, it then places a call to VerifySecurityToken to make sure the token is legitimate. We'll get back to that process in a little bit. Provided these checks pass, the system opens the operator's record, and subsequently passes the page with the Operator Record back in the pyOperPage parameter. This parameter is an 'OUT' parameter of type Java Object; it is critical that we perform this step. The code that runs this activity reads the page from this parameter and subsequently establishes the session in the context of that operator. We don't need to worry about setting this context; it is entirely done by PRPC, provided that we properly set pyOperPage parameter. The other out parameter, pyChallange, is used for an alternative case, which we will discuss shortly. It's also important to note that we need to make sure the 'Require authentication to run' option is not selected on the Security tab. This is because we need to run this activity before the user is authenticated. That's it! We have just covered the essential requirements of an authentication activity: create an operator page and pass it into the pyOperPage parameter, and uncheck the "Authenticate?" option. The pyChallenge Parameter and How to Set It Recall the pyChallange parameter discussed above. There are two possible outcomes for a given run of an authentication activity. The first is that the activity successfully identifies an operator, and the second outcome is whether or not PRPC subsequently associates the session with the operator so that he may go about his work. Perhaps an error thrown when the token was tested, or perhaps reauthentication is required because the session timed out. As we stated earlier, in the case of success, set the pyOperPage parameter to establish the operator for the session. On the other hand, if the session should not continue, set the pyChallenge parameter. This determines what happens after the activity is complete. It determines what is rendered on the screen. pyChallenge should be set to one of the following constant field values, all in the PRAuthentication interface: DEFAULT_CHALLENGE GENERATED_CHALLENGE_STREAM DEFAULT_REDIRECT_URL GENERATED_REDIRECT_URL DEFAULT_FAIL_STREAM Many of these render the PRPC interface based on a setting in the "Custom" tab of the corresponding Authentication Service Instance. Setting values in the "Custom" tab of an authentication service instance will, on its own, do nothing. This is only a repository for settings. Setting the pyChallenge parameter in the authentication activity is what truly drives which behavior is executed. If pyChallenge is set to "PRAuthenticaiton.DEFAULT_CHALLENGE", and it is an initial authentication rather than a timeout re-authentication, PRPC uses the standard PRPC authentication, or a customized HTML Stream, depending on how the "challenge options" are set in the authentication service instance. On the other hand, if this is a timeout scenario, the settings in "Timeout Options" are used instead. Please note that the "Challenge Options" are rarely used for SSO, since the authentication is typically started from an external system. These settings are used, however, with LDAP integration. If pyChallenge is set to "PRAuthentication.GENERATED_CHALLENGE_STREAM", the activity itself displays HTML; the authentication service instance is ignored. If pyChallenge is set to "PRAuthentication.DEFAULT_REDIRECT_URL", PRPC redirects to an external website. This is relevant for timeouts only. If an external website is used for the initial authentication, we probably want to reauthenticate using the same site. "PRAuthentication.GENERATED_REDIRECT_URL" also redirects to a URL, but does so based on the URL set in the pyRedirectTo parameter. Finally, setting pyChallenge to "PRAuthentication.DEFAULT_FAIL_STREAM" will render the HTML configured in the "Authentication Fail Stream" setting in the authentication service instance. Reading Parameters for Authentication Activities Let's take a closer look at our example web application. When we enter our username and password and click the link, the external website populates a URL Query String with UserId, UserName, and some other information. For example, When the authentication activity is executed, PRPC automatically populates these query parameters as activity parameters. So, we should be able to leverage these parameters throughout the activity. This is known as reading user credentials from the URL query string. Alternatively the information could be passed in as custom HTTP headers. As stated earlier, PRPC automatically populates activity parameters with values from the query string. This is not the case with HTTP headers. So, we need to do an API call to lookup these custom headers. Specifically, we need pull a value from the pxRequestor.pxHTTPServletRequest property. The pxHTTPServletRequest is a façade object for the HttpServletRequest java object. As such, it implements the javax.servlet.http.HttpServletRequest interface. Once a user has gained access to PRPC, it is no longer possible to access the pxHTTPServletRequest property; as such it can only be queried from an authentication activity. To retrieve a header value, use the built-in @java function as shown here: @java("((javax.servlet.http.Http.ServletRequest)tools.getRequestor().getRequestorPage().getObj ect(\"pxHTTPServletRequest\")).getHeader(\"UserId\")") Note that this is an example of pulling a "UserId" custom header. Replace "UserId" as appropriate for each of the custom headers we need to read. Organizing Custom Authentication Rulesets When writing authentication activities, give careful consideration to the ruleset in to which the rules are saved. The authentication activity is called shortly after authentication begins, prior to the requestor page being associated to the user who attempting to log in.. This makes sense, since it is the activity itself that establishes this operator. Ordinarily, the operator dictates the access group, which dictates the application, which dictates the ruleset, which in turn houses rules like our authentication activities. A "chicken and egg" problem perhaps? That is, how are rules called before PRPC knows the operator, the Application and its corresponding ruleset stack? The answer is the use of a "Requestor Type" instance — specifically a "Browser Requestor Type". It is this instance that is used to point to the access group, rather than an operator. In fact, there are other rules that have these same "pre-operator" characteristics. Consider the user interface rules that are used to render the login screen when SSO is not used. When a login page is shown, a requestor page is created, and a ruleset stack is assembled based on the access group in the browser requestor type instance. This ruleset stack continues to be used until some point after the authentication activity is run and the operator is established. The conclusion here is that the custom authentication activity should be saved into a ruleset that is dedicated for rules called before authentication. Do not mix it with process and business rules related to your application. Depending on what ruleset is used, update the "Browser" requestor type, and the Access Group it points to accordingly. Also, add this ruleset to the main application ruleset stack. This ensures that the timeout activity can be found when the application ruleset stack is active. A final note, the-applies to class of the authentication activity should be "Code-Security". Let's take a closer look at the how token verification works. In the previous example, we covered how our authentication activity provided two steps prior to locating the operator record. One was to validate against an Application ID, the other validated the token. Here is the activity we showed before.. If possible, have the external application name the parameters as described here, making it easier to leverage standard authentication and token verification activities. Looking back at our activity, we can see that the first step is to open the Application ID instance, which identifies the password and lag time that is used for token verification. As discussed, this is identified by the "From" parameter contained within the URL query string. Note that the application ID instance is opened into the "AppIDPage" page. Please also use this very same page name, as it is expected by the standard token verification activity, which is called next. Note that the "VerifySecurityToken-GMT" activity has the current parameter page passed into it. That way, it can readily consume the parameters that come from the URL. If we construct the token as shown in this lesson, it should be possible to call this standard activity. If not, create one based on it. The step has a transition to check if the token verification succeeded. If the "errorMessage" parameter is empty, the activity continues. If not, it advances to the error handling step. The error handling step involves setting the "pyChallenge" parameter and then displays the error message with HTML. Let's take a look at how PRPC has improved SAML integration in Pega 7. If you recall the lesson on Authentication Services, you should remember that we could create either a SAML Authentication Service, or a custom one. 542 This time, let's create a SAML Authentication Service. The SAML Auth Service provides the ability to state whether or not to use SAML authentication, and to provide the Identity Provider (IdP) information. To save time, we can also import the IdP information using the "Import IdP metadata" link. This opens a popup where we can either choose to get the information from a URL or a File. Similar to the Identity Provider, the form gives us the ability to configure the Service Provider settings, as well as a link to download the SP metadata. The last part of the form is where we specify the timeout and authentication activities to use with this SSO connection. The default activities for use with SAML are shown here: These default activities work similar to the rest of the authentication activities in that it's goal is to identify an operator, and then use that operator to set the correct context for the user session. External Authentication In the other security lessons, we cover how to access information from an external source, whether that comes from an LDAP connection or from a Single Sign-On process. In this lesson, we're going to take a look at how we can use this information to create or modify our operators. Operator on The Fly This process is known as Operator on the Fly, and is built into several sample Authentication Activities already. In an authentication activity, we need to open an operator record. If one doesn't exist, we can create a new one. The new operator is based on a model operator, and has some of its attributes updated. We'll get back to that in a little bit. In either case, whether the operator was created, or if an existing one is found, we finish up with updating some of the operator's properties. In the sample activities, this is typically just the user's name. But we can expand this to update any of the information available to us from the external system. For instance, we might need to change the operator's OrgUnit, or perhaps their phone number. In the later parts of this lesson, we'll be using this feature to also update their authorization. Mapping Attributes from LDAP When we're using an LDAP authentication, we can leverage built in functionality to set these properties for us. When used with the standard AuthenticationLDAP activity, we can specify the mapping of LDAP attributes to an operator on the Mapping tab of the Authentication Service record. To do this, we open the Authentication Service record, go to the mapping tab and then specify a relation between LDAP attributes on the left to properties of a Data-Admin-Operator-ID on the right. In this example, we showed how we would map the operator's Organization and OrgUnit. In other authentication schemes, it is probably easier to just create a simple Data-Transform to handle any mappings for other authentication schemes since we would not need to process the LDAP attributes, it's probably easier to just create a simple Data-Transform to handle any mappings. Using a Model Operator to Create a New Operator So how about creating a new operator? Most likely we won't have all the necessary information about an operator passed to us during the authorization process so instead we need to get it from another source. This is where the concept of a model operator comes in. The model operator is an operator record that doesn't reflect a real user. Or at least it shouldn't, but it's important to note that since the model operator is an operator record, somebody can log in as this operator. Therefore the model operator should always be granted the lowest authorizations by default. We'll override those and provide the correct authorizations when we map them from the external source. To establish a model operator, we first need to create an operator record. This record can be as generic or detailed as needed. Obviously the more details provided here, the less that needs to come from the external source, but it still needs to be generic enough to apply to all operators. The same is also true of properties on the Work tab. These should be detailed enough to provide the most detail, while still remaining generic enough to apply to all operators. It's important to note that we need to make sure we set the correct Organizational unit for this operator as we'll be creating one per org unit that will be logging in. A common practice, though not a specified best practice, is to uncheck the 'Operator is available to receive work' property. This prevents any work accidently getting assigned to this operator, especially if a work routing practice like round robin or load balancing is used. The next step is to define the model user in the Organization Unit record. We use this in our activity to determine which operator record to open as the model. Back in our authentication activity, we'll use this model operator as the starting point for a new operator. During authentication, if an operator record is not found we would: Identify which Org Unit record to use, based on a parameter passed. For example, something like a param.deptNumber could be used to identify the Org Unit based on a Cost Center's Number specified in the record. Open the Org Unit and retrieve the ID of the model user. Using this ID, open the operator record we created. Update the properties on the operator. Since later in the authentication activity we'll be updating the properties from our external source, the only two properties we'll need to worry about here are: o pyUserIdentifier - since this is the key of the operator record it is not typically updated in the later step o pyOpAvailable - if the common practice of disabling the 'Operator is available to receive work' property is being used, this property is set to 'false'. We will need to set it to 'true' for this user to be able to perform any work Based on the business' auditing requirements, some additional properties, such as the '.pxCreateDateTime' may also need to be updated. That's it. We can now let this new operator record flow through the remaining process of updating their information from the external system. Access Group Authorizations Access Group Authorizations By now you should already know that Access Groups drive the rights and privileges a user receives. They specify which applications a user can access, which access roles they are granted, the portals they are granted and a host of other information. This makes it perfect for mapping external authorizations. Provided we can classify our users as either having, or not having these rights. There are two approaches to this method. If the external system is capable, the best approach is to directly state the Access Group in the external system. This way, we can leverage direct mapping in the PRPC system. It's important to note that this needs to be set exactly the same in both systems. For example, let's take a look at the access group 'HRServices:Managers'. To be able to specify this access group for a user, the external system needs to provide this value. Then, in our mappings, it's as simple as setting the operator's access group as shown here: However, we don't always have control over what information we'll receive from the external source. In those cases, which frankly are the more likely case, we need to use an alternative approach, by looking up the Access Group. We first create a lookup to relate the values we expect from the external system with an access group. Here is a simple example that leverages a Decision Table: Alternatively this could have been accomplished with a decision tree, a map value, a look up against a custom data table, or any other method. But I like to use a decision table when possible because it provides a clean interface to the user. In this particular example, we either provide the manager's access group, if the LDAP group returns that they're a manager, or we default to the user access group. In reality, there is often more than just two choices. Back in our data transform, we then just need to use a function call to evaluate the decision table. Access When Authorizations Access When Authorizations The Access Group method works great as long as all the users receive the same rights. But what do we do when we need to deal with granular rights? For example, let's say an operator 'Fred' has been put on probation. While on probation, Fred can only work on the Candidates that he's currently assigned. We could set up a different access group, different access roles, etc... and then create mapping rules to switch Fred's access group, but that seems like a lot of work for a temporary situation like probation. Instead, we can leverage Access When rules to control this. Using an Access When, we can keep the same generic access group for all the operators, and conditional control Fred's access. First, we need a way to store the fact that Fred's on probation. We could use an existing property on the Operator record, but for this lesson, let's create a new one. That's right. We can extend the Operator records with our own properties! Let's create one now. Here we have a new true/false property called "OnProbation" defined on the operator. Now, we need to make sure it's mapped from our external attributes. In this example, we were using an LDAP authentication, so it's just a matter of adding it to the mapping tab of our authentication service. But we could have just as easily integrated this with any of our other mapping approaches. Now that the property is available on for operators, we can define a new Access When rule that looks at this property. This Access When rule evaluates if the person is on probation, and if true, also ensures the assignment belongs to them. The final step is to update the Access Manager to use the Access When rule we created. Once we switch to the right Access Group, we can change the Open and Modify rights from Full Access to Conditional and then specify our new Access When rule. That's it. Now whenever Fred, or anybody else who's on probation, logs in they will only be able to access the Candidates that they currently have assigned to them. Once their probation is lifted, and the corresponding flag is changed in the external system, they'll once again have the full access they had originally. Custom Authorizations These two approaches of course aren't the only ways to centralize the authorization model. Some clients have custom in-house systems they use instead to accomplish these tasks. Or they might need their authorizations to act against a scale. For example, a client might have a threshold of approved loan amounts. Below that threshold an operator can approve the loan themselves but over the threshold they must seek a manager's approval. This kind of business logic is often built into the process rather the various Access rules, but it is still an example of an external authorization. In these cases we follow the same kind of approach. We get the value from the external system, we store it against the user session, preferably attached to the Operator record directly, and then we evaluate it when necessary. Encryption User Passwords PRPC automatically encrypts the passwords for all operators in the system. If a user's operator record is accessed, all that you'll see is an indecipherable string, like this: "{pr}+/SpNl3dVR3TQnaD4Fxufp4XugBj5f2qdCp4cENaj7Ovt35p3GifDqa5op8Do+vV" This encryption protects any user that is internally authenticated. If we're using an external authentication, it is up to the external system to protect the user passwords. Ruleset Passwords Similar to the user passwords, PRPC automatically encrypts the passwords for locked rulesets. This prevents developers from unlocking earlier versions and making changes to existing rules. Stored Data Data that's stored in the PRPC database is not encrypted. The data in the BLOB columns is stored using a compression algorithm that render's the data unreadable for standard DB Queries, but that is not the same as encryption. Any PPRC system would be able to uncompress the data back to a readable form. To provide some level of protection, PRPC offers the ability to encrypt data as a Password, as a TextEncypted property or the entire BLOB. Properties that are encrypted as a Password use PRPC's existing encryption algorithms that are used for user and Ruleset passwords. A developer doesn't need any additional configuration and can use this feature out of the box. The other two options, TextEncrypted and BLOB encryption require us to create a site-specific cipher. This functionality relies on Java Cryptography Extension (JCE) technology built into the PRPC system. We'll get back to that later. Passwords in command line tools If we're using any of the command line tools, such as for migration, these tools often require access to either the PRPC system or the Database. The passwords used for these are stored in either the prbootstrap.properties or prconfig.xml files, depending on the tool. These files use clear text for these passwords normally, but they can be encrypted with some configuration. We can generate passwords for use in the prbootstrap.properties file using a built in cipher in the PRPC system, but the passwords for use in the prconfig.xml rely on first implementing the site-specific cipher referred to above. Site Cipher To implement many of these encryptions, we need to first implement our own cipher. By using our own cipher, we can ensure that we're the only ones with the proper keys to un-encrypt any data that we've encrypted. Running the scripts Before we can get started though, we need to review how to run the scripts we'll be using for creating this cipher. The script we need to use is called 'runPega'. This is script is located the scripts directory of the PRPC installation media and is available as either runPega.bat or runPega.sh. In either case, the arguments are the same. To run the script, we need to specify: --driver: --prweb: --propfile: Java-class: Args: Path to the JDBC driver Path to directory used to run this instance Path to a prbootstrap.properties file Name of a class to execute Any arguments that need to be provided to the class. To simplify executing this script, it is recommended to create a temporary directory with all the necessary files. The name of this directory doesn't matter, but let's refer to it as './OurTempPega' for the rest of this example. Within this directory, we require a 'WEB-INF' directory. And within the WEB-INF we require a 'lib' and a 'classes' directory. So, after all of our creations we should have: ./OurTempPega/WEB-INF ./OurTempPega/WEB-INF/lib ./OurTempPega/WEB-INF/classes Within the lib directory, we need to copy these files: jsr94-1.0.jar prbootstrap.jar prdbcp.jar And the corresponding jar files for our JDBC connection Within the classes directory, we need to copy these files: prbootstrap.properties prlogging.xml prconfig.xml Once this is all done, we can execute the script by specifying: 554 ./runPega.sh -driver ./OurTempPega/WEB-INF/lib/<jdbc driver>.jar -prweb ./OurTempPega/WEB-INF - propfile ./OurTempPega/WEB-INF/classes/prbootstrap.properties <java class> <arguments> Throughout the rest of this lesson, instead of repeating these arguments everytime we'll just refer to: runPega <java class> <arguments> However be sure to include the arguments when actually entering them on the command line. Creating the Cipher Creating the Cipher The first step to creating our own cipher, is to determine which ciphers are available on the system. We achieve this by running the script with the JCECapabilites class and an argument of none. runPega com.pega.pegarules.exec.internal.util.crypto.JCECapabilities none This will provide a list of wealth of information, but we're only interested in the providers, ciphers and key generators. We need to pick a cipher that is listed in both the cipher and keygenerator outputs. [Cipher.png] [KeyGenerator.png] Based on the lists provided in our sample system, as shown here, we can choose from among: AES ARCFOUR 555 Blowfish DES DESede RC2 These are the only ones that are present in both lists. Any of the others cannot be used because they're missing from one list or the other. The next step is to generate a class skeleton for our cipher. We do this by running the same runPega script, but in this case we specify the PRCipherGenerator class. Note that there are no arguments. runPega com.pega.pegarules.exec.internal.util.crypto.PRCipherGenerator This command prompts for three inputs: Here, we can specify any of the above ciphers for the transform, or we can select the default suggested value. DESede is one of our valid ciphers, so let's go ahead and select that one. The key length is specified next. Again let's accept the default of 112. The last one is the provider. Again let's accept the default. This generates an output we can use to create a class file: Using this starting class, we need to replace two values: YYYY.ZZZZ needs to be replaced with a new name for the package. XXXX needs to be replaced with a new name for the class. Note that there are two locations where this needs to be replaced. Once we've created and updated the class, we need to get it into the rulebase. We do this by using the compileAndLoad script. Similar to runPega, this script takes the three parameters of: --driver the path to the jdbc driver for our database 556 --prweb the path to the our temporary directory --propfile the path to the prbootstrap.properties file And the additional parameters of: --basedir --jarfile --codeset the path to the directory with our class a name for the jar file that will be created to hold our class either one of: o Pega-EngineCode to put it in the base pega codeset. This is considered best practice but it would need to be manually migrated to any new version of PRPC. o Customer to put it in the customer codeset. This doesn't need to be manually migrated to a new version of PRPC. --codesetversion the latest version of the codeset. If using the customer codeset the version is always 06-01-01 -prprivate this is the full package path to the cipher class we've just created. This must be in quotes. Note that this parameter only uses a single '-' instead of the double '—' used in all the other parameters. So, if we put this together with our sample directories, the command would look like: ./compileAndLoad.sh -driver ./OurTempPega/WEB-INF/lib/<jdbc driver>.jar -prweb ./OurTempPega/WEB-INF -propfile ./OurTempPega/WEB-INF/classes/prbootstrap.properties -basedir ./OurTempPega -jarfile <nameofjar> --codeset Pega-EngineCode -codesetversion 07-10-09 -prprivate "<pathtoclass>" And after running the command, we should see an output similar to this: The last step is to update the system to use the cipher we just created. This is done in the prconfig.xml file. <env name="crypto/sitecipherclass" value="<the full name of our class>" So, if we used the same class from our sample: <env name="com.pega.pegarules.exec.internal.util.crypto.testCipherClass"> After updating the prconfig.xml, we'll need to restart the server so the changes can take effect. Now that we've created the cipher and loaded it into the rulebase, we need to start implementing it throughout the various tools we use. Encrypting prbootstrap.properties The first one we'll address is the database password in prbootstrap.properties. This password is currently stored in clear text. To create the encrypted password, we need to use a standalone class called PassGen. This class encrypts a password using a PBEWithMD5AndAES cipher, which is different than our site specific cipher. This is because our site specific cipher is stored in the database so the system needs a standardized way to access our cipher that is still protected. To generate the password, we again use the runPega command: runPega com.pega.pegarules.pub.PassGen password After running the command, we should see an output similar to this: We then take this password and replace the clear text password in the prbootstrap.properties file with the one shown here. The password for the database is now encrypted. Encrypting prconfig.xml Some of the command line tools rely on prconfig.xml instead of the prbootstrap.properties file. To encrypt these connections, we can create a JCE Keyring file to hold the passwords. This file is created or updated using the KeyringImpl class. This class takes three parameters: The path to the keyring file and the keyring file name. The name must match the name given for the database in the prconfig.xml file. I.E. pegarules The path to the prconfig.xml file The path to our running directory Again, we execute this class using the runPega command: runPega com.pega.pegarules.exec.internal.util.crypto.KeyringImpl ./OurTempPega/WEB- INF/classes/<keyring file> ./OurTempPega/WEB-INF/classes/prconfig.xml ./OurTempPega Note that this class doesn't use parameter keywords, so the order is important. Once we execute this command, we receive a series of prompts to step us through the process: 1. Provide the password to update the keyring file. If this is an existing keyring then the password must match the one given at this step during creation. 2. Confirm the clear text values for the database url, database user name and database user password that is read from the prconfig.xml file. 3. We're then prompted to provide the password to encrypt. Accepting the default and pressing enter will work in most cases, as this will be the password that was already read from prconfig.xml. 4. Optionally, we could have entered REMOVE which will remove this entry from the keyring. 558 5. After this step, we can remove the clear text password from the prconfig.xml file. The system will now recognize that there is no password provided in the prconfig.xml file and then look for the corresponding pegarules.keyring file to retrieve the password. Using the keyring in a command line When running from a command line, we can explicitly state the keyring to use by providing the parameter -Dpegarules.keyring and full path to the keyring file. In our sample, the parameter will look like this: -Dpegarules.keyring= Additional steps for BIX If we're encrypting these values to use with a BIX implementation, then there are a few extra steps we need to take to also encrypt the PRPC user and password BIX uses to access the system. We follow the same steps as we did for encrypting the database password, but we include a fourth parameter of bix. Using our same sample, the command would look like: runPega com.pega.pegarules.exec.internal.util.crypto.KeyringImpl ./OurTempPega/WEB- INF/classes/<keyring file> ./OurTempPega/WEB-INF/classes/prconfig.xml ./OurTempPega bix This tells the system we're encrypting for BIX and triggers some additional prompts. 1. Enter bix username: 2. Enter bix password: We provide the username and password when prompted then click enter to complete the process. The last step is to update the BIX command line to use the keyring using the same -Dpegarules.keyring parameter we've covered. Effective Date Optional. Enter an effective date for the rules in this ruleset version to support historical processing. Use the format YYYYMMDD or click the calendar icon () to choose a date. This value does not affect normal system operation; rules in this ruleset version are available before and after the date. If you leave this field blank and save the Version form, the system enters in this field the date that this version rule was created. The Effective Starting value affects processing only for requestor sessions that execute the PublicAPI method setRuleSets() to establish a cutoff date for rulesets. This function revises the requestor session ruleset list to exclude rulesets with an effective date later than a supplied date. To negate or turn off historical processing, call restoreRuleSetList().Your activity can call setRuleSets() multiple times with successively earlier dates, but you can't advance the date. So, to turn on 2005 processing after previously turning on 2003 processing, return to the current date with restoreRuleSetList() first. Such historical processing allows only "as-was" rules to apply (when this field is completed accurately and the setRuleSets() capability is enabled). For example, tax processing performed in 2005 may apply the tax rules in force as of January 1, 2003, if properly configured. NoteDon't enter a future date in this field. A future date does not prevent the rules in this ruleset version from being found by rule resolution. ;
https://quizlet.com/93876874/pega-7-training-flash-cards/
CC-MAIN-2019-35
refinedweb
71,629
53
Have you ever played a game that used a cooldown system that caused you to wait a certain amount of time before being able to attack, or use a spell? Game designers use this technique to create balance in the game, otherwise it will be to easy and there would be no challenge (how boring). It turns out we're in the same situation with our player's projectile attack. There's nothing in our code to stop the player from shooting a projectile as quickly as they can hit the spacebar. Yeah, we need to fix that. In this article, I'll cover just one way to make a cooldown system. Time. Time. Who has the time? My approach is to create two variables; one that defines the interval the player has to wait until shooting (I'll add the SerializeField attribute), and another to store the future time in seconds to wait, which is a sum of the current time and the wait interval after the player fires. Let's create the new variables in the Player class. public class Player : MonoBehaviour { ... [SerializeField] float fireCooldownInterval = 0.5f; float fireCooldownDelay = -1; .... } I set the interval to 0.5 as a place to start. The delay is set to -1 initially because the first to ensure the first shot happens without a delay. Tracking Time Elapsed In our fire condition, we'll only instantiate a projectile if the current time is greater than our delay time. Once the player shoots a projectile, we then want to assign the current time in seconds plus the interval to our delay. void RespondToFire() { if (Input.GetKeyDown(KeyCode.Space) && Time.time > fireCooldownDelay) { Instantiate(projectilePrefab, projectileSpawnPoint.position, Quaternion.identity); fireCooldownDelay = Time.time + fireCooldownInterval; } } I'll set the delay to one second so it's easy to see the effect. Now when I try to spam projectiles with the spacebar, I can only fire every second. Summary We now have an attack cooldown mechanic that can easily be modified by anybody on the team with no coding required. This is a pattern we can reuse throughout our game anytime we need a cooldown. Take care. Stay awesome.
https://blog.justinhhorner.com/adding-a-player-attack-cooldown
CC-MAIN-2022-27
refinedweb
361
65.42
what the cycling people don't do well is self-promotion. I hadn’t realized that Gregory Taylor’s new CD has now been released (or maybe I just missed the announcement?): Highly recommended! brad On 9 May 2006, at 14:41, Bradford Garton wrote: > Highly recommended! Yep – must snarf myself a copy. radiaL sends me crosseyed after a while, but people make some amazing music with it. — N. nick rothwell — composition, systems, performance — http:// Bradford Garton wrote: > is self-promotion. I hadn’t realized that Gregory Taylor’s new CD has > now been released (or maybe I just missed the announcement?): It’s on the front page of our site and was announced in a mass mailing to several thousand folks along with UpMix on Friday, so… Oh yeah, we missed the mass telepathic broadcast. I’ll be sure to raise hell at the next staff meeting. Thanks for the prod ;-) w Is there a way to listen to, at least, one whole title ? this kind of music doesn’t fit for 30 sec shots (anyway, which does ?). If you’re affraid to be downloaded, take a look at this perfect light flash player i use myself : h-mp3-leger-comme-une-plume It’s french (yeah !) so it’s in french but a google on "DewPlayer" will do the trick. Creative Commons license too. Easy to integrate, fast, handy, discrete… best f.e ps: btw, Gregory, i thought you were playing free jazz music ;-) f.e chanfrault | aka | personal computer music > >>>>>> > >>>>>> |sublime music for a desperate people| I shall strive to throw off the confluence of my southern birthright [don’t put on airs] and my midwestern socialization [don’t think you’re anything special] especially for this occasion. There are several short excerpts from VJ on the c74 webpage for the disc. Like all the other releases, they are short MP3s. The actual performance is a continuous single-take live set, so I suppose you could take the MP3s, import them into buffers, and write a patch that performed spectral interpolations between them over long periods of time. :-) >ps: btw, Gregory, i thought you were playing free jazz music ;-) mais oui, although after a bruising bar fight with one of the Marsalis boys, I have promised to refer to it as free improv, and to avoid the term "music." The recording on Voiceband Jilt occurred within hours of a Kraakgeluiden performance with Jorrit Dykstra, Andrew Neumann, and Stephie Buttrich that you might have enjoyed. Thanks for your interest. Brad Garton, who started this whole embarrassing exchange off in the first place, has – unaccountably – spent quite a lot of time using other recordings of mine as source material for his own recent works. I have no idea why, but refuse to argue with jaw-droppin’ heart-stoppin’ hip=hoppin’ good fortune. You can find them, and things about them here: tary.html T.S. Eliot dedicated "The Wasteland" to Ezra Pound with the epigram "il miglior fabbro" [the better workman]. Brad makes me sound really good, probably better than I sound normally. Let’s talk about something else now, okay? Forums > MaxMSP
https://cycling74.com/forums/topic/what-the-cycling-people-dont-do-well/
CC-MAIN-2016-36
refinedweb
524
71.14
Hi, I'm working on a game (using MIDP, jsr-226) for nokia S40, and I met some problem with the sound. Especialy I ve noticed few differences between the phone and the simulator. -- My biggest problem consists of an additionnal noise (crackling, or beep sometimes (this noise can change for the same wav)) played after my original wavs. My wavs match with the recommendation ( 8bit , 8kHz , mono, PCM). And this noise appears only on the phone (nokia 6280 (S40)) not on the simulator. -- Second problems, obviously my sounds slow down the game. with lag before playing the sound. And I ve not seen the cause in my (classical) code : --- Last problem : the simulator can play a background music in Midi and my special effects (collision by example) in same time, unlike the phone--- Last problem : the simulator can play a background music in Midi and my special effects (collision by example) in same time, unlike the phoneCode:public class ShotSounds implements PlayerListener{ // for playing sounds (fired shots ) private Player shotPlayers; public ShotSounds(){ // load fired shot sounds shotPlayers = loadSound("/sounds/wood.wav"); private Player loadSound(String fn) // load fn sound { Player p = null; try{ InputStream in = getClass().getResourceAsStream(fn); p = Manager.createPlayer(in,"audio/x-wav"); p.realize(); p.prefetch(); // move player to PREFETECHED state } catch(Exception ex) { System.out.println("Could not load sound in " + fn); } return p; } public void playShotSound(){ playSound( shotPlayers ); } If anyone has met one of these problems and may be few solutions i'm waiting for your testimonies.i'm waiting for your testimonies.
http://developer.nokia.com/Community/Discussion/showthread.php/81707-Sound-and-S40
CC-MAIN-2013-48
refinedweb
259
53
Each Answer to this Q is separated by one/two green lines. I have some tests in Python written in unittest. I want to check that some of my dictionaries contain at least certain attributes equal to certain values. If there are extra values, that would be fine. assertDictContainsSubset would be perfect, except that it’s deprecated. Is there a better thing that I should be using or should I just recursively assert the contents to be equal if they are in the target dictionary? The docs recommend using addTypeEqualityFunc, but I do want to use the normal assertEqual for dicts in some cases. On Python 3.9+, use the dictionary union operator. Change assertDictContainsSubset(a, b) to assertEqual(b, b | a) On older versions of Python, change it to assertEqual(b, {**b, **a}) Note the order of the arguments, assertDictContainsSubset put the “larger” dictionary ( b) second and the subset ( a) first, but it makes more sense to put the larger dictionary ( b) first (which is why assertDictContainsSubset was removed in the first place). This creates a copy of b then iterates over a, setting any keys to their value in a and then compares that result against the original b. If you can add all the keys/values of a to b and still have the same dictionary, it means a doesn’t contain any keys that aren’t in b and all the keys it contains have the same values as they do in b, i.e. a is a subset of b. If you were testing if dict A is a subset of dict B, I think I would write a function that tries to extract the content of dict A from dict B making a new dict C and then assertEqual(A,C). def extractDictAFromB(A,B): return dict([(k,B[k]) for k in A.keys() if k in B.keys()]) then you could just do assertEqual(A,extractDictAFromB(A,B)) Extending on @bman’s answer, exploiting that the comparison operators for set-like objects are overloaded as subset operators, you can use assertGreaterEqual for (arguably) better error messages. Compare the two tests: import unittest class SubsetTestCase(unittest.TestCase): def test_dict_1(self): a = {1: 1, 2: 2} b = {1: 2} self.assertTrue(a.items() >= b.items()) def test_dict_2(self): a = {1: 1, 2: 2} b = {1: 2} self.assertGreaterEqual(a.items(), b.items()) unittest.main() The result is: ====================================================================== FAIL: test_dict_1 (__main__.SubsetTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "test.py", line 9, in test_dict_1 self.assertTrue(a.items() >= b.items()) AssertionError: False is not true ====================================================================== FAIL: test_dict_2 (__main__.SubsetTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "test.py", line 15, in test_dict_2 self.assertGreaterEqual(a.items(), b.items()) AssertionError: dict_items([(1, 1), (2, 2)]) not greater than or equal to dict_items([(1, 2)]) ---------------------------------------------------------------------- With assertGreaterEqual, you can see the contents of the two dictionaries from the error message. Andrew has offered a solution that uses assertEqual. But, it is useful for future readers, to know two alternative solutions that are more concise. First one uses issubset method of a set: assert set(A.items()).issubset(set(B.items())) But there is yet another simpler more Pythonic way to do this: set(A.items()) <= set(B.items()) The pitfall of the second solution is that you would not know which keys of the superset are missing from the subset. However, both solutions would fail if your values have unhashable variables (such as dict) inside them.
https://techstalking.com/programming/python/python-unittests-assertdictcontainssubset-recommended-alternative-duplicate/
CC-MAIN-2022-40
refinedweb
578
64.1
0 Replies Latest reply on Feb 28, 2013 4:26 PM by 994002 Inner class inconsistency between signature and descriptor. 994002 Feb 28, 2013 4:26 PM Hi all! I'm trying to understand if there's a reason in the spec for the discrepany between java descriptors and signatures for inner classes. (I'm looking directly at the content of the class files here, but I use javap to illustrate). ( n.b. I have tried this on JDK 1.6.0_33 and 1.7.0_05, both have the same issue when viewed with javap from Java 7 - java 6's javap doesn't seem to show any generic signature info ) Consider: public class InnerClassTest1 { public int getX() { return new Inner1(new ArrayList<String>()).getX(4); } public class Inner1 { private final List arg; public Inner1(List arg) { this.arg = arg; }.... and the same, with generics on the inner class public class InnerClassTest2 { public int getX() { return new Inner1(new ArrayList<String>()).getX(4); } public class Inner1<E> { private final List<E> arg; public Inner1(List<E> arg) { this.arg = arg; }..... If you look at the output of javap -cs [ USING JAVA 7 RUNTIME ] on the inner classes, they're surprisingly different! public org.benf.cfr.tests.InnerClassTest1$Inner1(org.benf.cfr.tests.InnerClassTest1, java.util.List); Signature: (Lorg/benf/cfr/tests/InnerClassTest1;Ljava/util/List;)V vs public org.benf.cfr.tests.InnerClassTest2$Inner1(java.util.List<E>); <--- Generic signature missing the implicit outer class Signature: (Lorg/benf/cfr/tests/InnerClassTest2;Ljava/util/List;)V <--- non generic descriptor based signature is correct. Using java 6, they both look the same, but javap doesn't show generics info. Looking at the classfile / constpool / signature attributes, I can see that in the latter case the DESCRIPTOR for <init>Inner1 is (Lorg/benf/cfr/tests/InnerClassTest2;Ljava/util/List;)V but the SIGNATURE (i.e the signature attribute on the method) for the same is *(Ljava/util/list<TE;>)V* , which is wrong. I think that when javac builds the signatures for inner classes, it is skipping the implicit argument for the outer class's this pointer. contains sample files, and output when viewed in JAVAP under java 7. Does anyone know what's going on here? I have the same question Show 0 Likes (0) This content has been marked as final. Show 0 replies Actions
https://community.oracle.com/message/10881992?tstart=0
CC-MAIN-2016-44
refinedweb
394
56.35
Yup, it does. Nice detective work! On Mon, Jul 13, 2015, 8:58 AM Britton Smith brittonsmith@gmail.com wrote: Your tip led me to the right answer. The call to parallel_objects was happening in the derived quantity, where each processor is being made into its own comm where it is rank 0. The issue is that they then try to identify fields and incorrectly think of themselves as rank 0 for choosing which grids to look at. If I simply as ds.index right after creating the dataset, the problem goes away. This should probably just be added to the bottom of the __init__ for EnzoDatasetInMemory. Does that sound right? Britton On Mon, Jul 13, 2015 at 2:38 PM, Matthew Turk matthewturk@gmail.com wrote: That sounds like a new communicator got pushed to the top of the stack when it should not have been, perhaps in a rogue parallel_objects call. On Mon, Jul 13, 2015, 8:35 AM Britton Smith brittonsmith@gmail.com wrote: Hi again, Maybe this is a clue. In _generate_random_grids, self.comm.rank is 0 for all processors, which would explain why N-1 cores are trying to get grids that don't belong to them. Interestingly, mylog.info prints out the correct rank for each of them. Britton On Mon, Jul 13, 2015 at 2:21 PM, Britton Smith brittonsmith@gmail.com wrote: Hi Matt, Thanks for your help. Adjust by grid._id_offset did not work, but I can that what is happening is that all processors are trying to call _read_field_names using grid 1, when only processor 0 owns that grid. I will look into why now, but if you have any intuition where to check next, that would be awesome. Thanks, Britton On Mon, Jul 13, 2015 at 1:51 PM, Matthew Turk matthewturk@gmail.com wrote: Hi Britton, What looks suspicious to me is the way it's using grid.id. This might lead to an off-by-one error. Can you try it with grid.id-grid._id_offset and see if that clears it up? On Mon, Jul 13, 2015 at 7:42 AM, Britton Smith brittonsmith@gmail.com wrote: Hi all, I've recently been trying to use yt's inline analysis functionality with Enzo and am having some difficultly getting it to work in parallel. I am using the development tip of yt. In serial, everything works fine, but in parallel, I get the following error: It seems that the issue is that yt is not correctly identifying which grids are available on a given processory for the EnzoDatasetInMemory object. Does anyone have an idea of how to fix this? Has anyone else seen this? For reference, my user_script is just this: import yt from yt.frontends.enzo.api import EnzoDatasetInMemory def main(): ds = EnzoDatasetInMemory() ad = ds.all_data() print ad.quantities.total_quantity("cell_mass") Thanks for any help, Britton
https://mail.python.org/archives/list/yt-users@python.org/message/6XE4C3N5VUUGGFSAEIY6SFXTZE4T4YQ7/
CC-MAIN-2019-35
refinedweb
481
67.86
poker library offers many new and unique features that make analysis of Texas Holdem much easier. In this article, you will find the following topics covered: Part 2 will cover these topics: If you are reading this article, I'm sure you've read a book or two on Holdem. One of the things I've noticed while reading poker books is that there is a de facto standard for describing pocket cards. Most books use some variant of the following to describe pocket hands. Pocket Hand Description Language Example Description Ac Kd Specific cards are specified using letters and numbers for ranks such 'A'-ace, 'K'-king, 'Q'-queen, 'J'-jack, 'T'-10, '9'-nine and so on. Suits are described usually in lower case as 'c'-clubs, 's'-spades, 'h'-hearts and 'd'-diamonds. AKs, 78s Card combinations where only the card ranks are specified and the 's' indicates they are suited. QJ, T8 Card combinations where the card ranks are specified and the cards suits are unsuited or offsuit. Kx, Ax This notation indicates a card rank and another unknown card which is less than 9 and doesn't form a pair. Most poker software that parses string representations of pocket hands only supports the first item in the de facto standard. I've implemented a much richer query language. I support all of the common poker book syntax, plus some extensions. The Extended Pocket Hand Description Language Group1 Any starting hand in Sklansky group1. Group1 through Group8 are supported. Suited Any set of pocket cards that are of the same suit. Offsuit Any set of pocket cards that are of different suits. Connected Any pocket card set, such as AK, that are adjacent in rank. Gap1 Any pocket card set, such as AQ which have a 1 card. Gap2 Any pocket card set, such as AJ which have a 2 card. Gap3 Any pocket card set, such as AT which have a 3 card. I've also added some operators. Pocket Hand Description Language Basic Operators Group2 To Group5 Sklansky groups can be specified as a range. Ranges use the '-' character or the word 'to' or the word 'Through' Group3 And Offset The intersection operator can be used with any pair of expressions. The resulting set is the intersection of the two expressions. The keywords: 'And', '&' and 'Intersection' can all be used to represent this operation. Group1 Or Group3 The union operator can be used with any pair of expressions. The resulting set is the union of the two expressions. The keywords 'Or', '|' and 'Union' can all represent this operation. Not Offset The 'Not' operator returns the 1326 set of possible pocket cards minus the items defined in the expression on the right. (AK* | AA) & Offset Parenthesis may be used to group operations. One of the things I've done too many times has been to write code to analyze specific match-ups. For example, have you ever wondered what the advantage is to having a suited connector versus a non-suited connector? I'd guess that you've probably wondered, but weren't enough of a masochist to write the code. On the other hand, I am enough of a masochist to write the code for many, many match-ups. After awhile, I decided I'd had enough of that and wrote a query language so that I could write my match-up analysis once and just put in query strings. The following is an example of the result of using query strings rather than hard coded match-ups. Oh, and there is about a 5% advantage for suited connectors. I've attempted to make it trivial to write analysis code that utilized Pocket Queries. Here's an example. using System; using System.Collections.Generic; using System.Text; using HoldemHand; namespace ConsoleApplication1 { class Program { static void Main(string[] args) { // A Pocket Query Returns an array of all // hands that meet the criterion. ulong[] player1 = PocketHands.Query("Connected Suited"); ulong[] player2 = PocketHands.Query("Connected Offsuit"); // Holds stats long player1Wins = 0, player2Wins = 0, ties = 0, count = 0; // Iterate through 10000 trials. for (int trials = 0; trials < 10000; trials++) { // Pick a random pocket hand out of // player1's query set ulong player1Mask = Hand.RandomHand(player1, 0UL, 2); // Pick a random pocket hand for player2 ulong player2Mask = Hand.RandomHand(player2, player1Mask, 2); // Pick a random board ulong boardMask = Hand.RandomHand(player1Mask | player2Mask, 5); // Create a hand value for each player uint player1HandValue = Hand.Evaluate(boardMask | player1Mask, 7); uint player2HandValue = Hand.Evaluate(boardMask | player2Mask, 7); // Calculate Winners if (player1HandValue > player2HandValue) { player1Wins++; } else if (player1HandValue < player2HandValue) { player2Wins++; } else { ties++; } count++; } // Print results Console.WriteLine("Player1: {0:0.0}%", (player1Wins + ties / 2.0) / ((double)count) * 100.0); Console.WriteLine("Player2: {0:0.0}%", (player2Wins + ties / 2.0) / ((double)count) * 100.0); } } } Notice that I've added a new class to the Holdem library. It's called PocketHands. Here are some simple examples of how to use this class: PocketHands The simplest way to use this class is to iterate through all possible pocket hands for the specified Pocket Query. // This will iterate through all the possible "connected suited" pocket hands foreach (ulong pocketmask in PocketHands.Query("Connected Suited")) { // Insert calculation here. } Another way to use this class is to iterate through a Pocket Query given a specific hand match-up. // Looks at an AKs match up (specifically As Ks) against all possible // opponents hands that are connected and suited. ulong mask = Hand.Evaluate("As Ks"); // AKs foreach (ulong oppmask in PocketHands.Query("Connected Suited", mask)) { // Insert calculation here. } This example loops exhaustively through two specific Pocket Query match-ups. // Iterates through all possible "Connected Suited" versus // "Connected Offsuit" match ups. foreach (ulong playermask in PocketHands.Query("Connected Suited")) { foreach (ulong oppmask in PocketHands.Query( "Connected Offsuit", playermask)) { foreach (ulong board in Hand.Hands(0UL, playermask | oppmask, 5)) { // Insert Calculation Here } } } It's also possible to use pocket queries while doing random samples of match-ups. // Randomly selects 100000 possible hands when player starts with a // suited connector ulong[] masks = PocketHands.Query("Connected Suited"); for (int trials = 0; trials < 100000; trials++) { // Select a random player hand from the list of possible // Connected Suited hands. ulong randomPlayerHandMask = Hand.RandomHand(masks, 0UL, 2); // Get a random opponent hand ulong randomOpponentHandMask = Hand.RandomHand(randomPlayerHandMask, 2); // Get a random board ulong boardMask = Hand.RandomHand(randomPlayerHandMask | randomOpponentHandMask, 5); // Insert evaluation here } Most Hold'em players know what outs are. According to Wikipedia: [A]n out is any unseen card that, if drawn, will improve a player's hand to one that is likely to win [A]n out is any unseen card that, if drawn, will improve a player's hand to one that is likely to win The question for the programmer is, "Is this definition sufficient to write a function that returns the cards that are outs?" Let's look at the two key points made by Wikipedia. They are: Let's start by writing a function that meets the first criterion.) { ulong retval = 0UL; ulong hand = pocket board; // Get original hand value uint playerOrigHandVal = Hand.Evaluate(hand); // Look ahead one card foreach (ulong card in Hand.Hands(0UL, hand, 1)) { // Get new hand value uint playerNewHandVal = Hand.Evaluate(hand card); // If the hand improved then we have an out if (playerNewHandVal > playerOrigHandVal) { // Add card to outs mask retval = card; } } // return outs as a hand mask return retval; } } } Passing this starting hand A♠ A♣, K♦ 8♥ 9♣ into our new method returns the following outs: I think most people would agree that super-sizing your kicker probably doesn't help much here. I think most people would also agree that improving the board doesn't help either. So, let's add two more rules: The following example handles these new rules and allows opponent hands to be added., params ulong [] opponents) { ulong retval = 0UL; // Get original hand value uint playerOrigHandVal = Hand.Evaluate(pocket board); // Look ahead one card foreach (ulong card in Hand.Hands(0UL, board pocket, 1)) { // Get new hand value uint playerNewHandVal = Hand.Evaluate(pocket board card); // Get new board value uint boardHandVal = Hand.Evaluate(board card); // Is the new hand better than the old one? bool handImproved = playerNewHandVal > playerOrigHandVal && Hand.HandType(playerNewHandVal) > Hand.HandType( playerOrigHandVal); // This compare ensures we move up in hand type. bool handStrongerThanBoard = Hand.HandType(playerNewHandVal) > Hand.HandType( boardHandVal); // Check against opponents cards bool handBeatAllOpponents = true; if (handImproved && handStrongerThanBoard && opponents != null && opponents.Length > 0) { foreach (ulong opponent in opponents) { uint opponentHandVal = Hand.Evaluate(opponent board card); if (opponentHandVal > playerNewHandVal) { handBeatAllOpponents = false; break; } } } // If the hand improved then we have an out if (handImproved && handStrongerThanBoard && handBeatAllOpponents) { // Add card to outs mask retval = card; } } // return outs as a hand mask return retval; } } } The problem with outs calculation is that you often don't know the opponent cards you are up against. That makes this calculation a bit subjective. I've had many discussions with different folks about this. One of the more interesting discussions was with Matt Baker. He rewrote my outs function (shown above) to include a heuristic that tries to more accurately account for opponent cards. I won't go into that here, but you can use his code. He generously provided OutsDiscounted and OutsMaskDiscounted. OutsDiscounted OutsMaskDiscounted static int OutsDiscounted(ulong player, ulong board, params ulong[] opponents) The OutsDiscounted function returns the number of outs for the specified player's pocket hand, the current board and optionally the opponent pocket hands. ulong OutsMaskDiscounted(ulong player, ulong board, params ulong[] opponents) The OutsMaskDiscounted function returns a card mask of all of the cards that are outs. You can turn this mask into a string by calling MaskToString(). MaskToString() I've had several folks contact me about other functions that focus on specific outs calculations. These are referred to as "draw" calculations. Wesley Tansey requested several variants of draw functions and offered to test them for me. The result is the following set of functions. The StraightDrawCount method returns the number of straight draws that are possible for the player, board and dead card configuration. It also filters the results so only player hand improvements are counted. StraightDrawCount public static int StraightDrawCount(ulong player, ulong board, ulong dead) The IsOpenEndedStraightDraw function returns true if the combined mask is an open-ended straight draw. Only straight possibilities that improve the player's mask are considered in this method. IsOpenEndedStraightDraw public static bool IsOpenEndedStraightDraw(ulong pocket, ulong board, ulong dead) The method IsGutShotStraightDraw returns true if the combined cards contain a gut shot straight draw. IsGutShotStraightDraw public static bool IsGutShotStraightDraw(ulong pocket, ulong board, ulong dead) The method IsStraightDraw returns true if the combined cards contain a straight draw. IsStraightDraw public static bool IsStraightDraw(ulong pocket, ulong board, ulong dead) The method IsOpenEndedStraightDraw returns true if the combined cards contain an open-ended straight draw. This method counts the number of hands that are a flush with one more drawn card. However, only flush hands that improve the board are considered. public static int FlushDrawCount(ulong player, ulong board, ulong dead) This method returns true if there are 4 cards of the same suit. public static bool IsFlushDraw(ulong pocket, ulong board, ulong dead) This method returns true if there are three cards of the same suit. The pocket cards must have at least one card in that suit. public static bool IsBackdoorFlushDraw(ulong pocket, ulong board, ulong dead) This method returns the number of draws that are possible for the specified HandType. It only returns the counts that improve the player's mask, rather than just the board. HandType public static int DrawCount(ulong player, ulong board, ulong dead, Hand.HandTypes type) I've included several demo programs and examples in the downloadable project. This is a quick summary of each of the demo applications: This application graphs several interesting values to allow hand/board scenarios to be scenarios against 1 through 9 opponents. This demo application accepts Pocket Query descriptions for the Player Pocket field. These values include: This application allows a player pocket hand and an opponent pocket hand definition -- using the Pocket Query Language -- to be entered along with a board definition. The resulting odds are returned for a single random opponent. Since this takes the Pocket Query Language as input, it makes it very easy to try all kinds of pocket hand scenarios that would otherwise require a fair amount of programming to determine the answer. DiscountedOuts I could easily have spent another 6 months editing and tweaking the code and examples. I decided that I had spent enough time on this and had a "critical mass" of features available. So, here is my current code, warts and all. I don't claim to be a great poker player or even a good one, for that matter. To paraphrase: those who can do, those who can't code. I don't recommend taking any poker advice I may have given (or implied to have given) in this article without independent verification. I'd also like to encourage feedback. Poker programmers appear to be a somewhat secretive lot, for good reason I'm sure. I've learned a lot from the feedback I've been given over the last year or so. Please keep it coming. First released May.
http://www.codeproject.com/Articles/19091/More-Texas-Holdem-Analysis-in-C-Part-1/?fid=426176&df=90&mpp=10&sort=Position&tid=3608080
CC-MAIN-2015-11
refinedweb
2,209
56.76
Created 10-02-2015 04:02 PM Currently, seems Livy has hardcoded the file path to be hdfs if running against yarn mode according to the following code. def filesystemRoot(): String = { Configuration cE = new HdfsConfiguration(false); sessionKind() match { case Process() => "file://" case Yarn() => "hdfs://" } } We would like to use wasb:// as default but want to make this generic so it can be easily extended to even other FS. We would like to seek advice on how this should be done. Option 1: Hadoop has a core-site.xml which lives in the HADOOP_CONF_DIR. It has a deafultFS setting which we can use here. Option 2: Introduce a new config value in livy-defaults.conf (say livy.yarn.fs) which points to the default fs to be used. We can default it to hdfs if not configured. Which is a more preferrable option? Thanks Lin
https://community.cloudera.com/t5/Support-Questions/Introduce-a-new-file-system-in-Livy/td-p/32613
CC-MAIN-2019-43
refinedweb
144
76.01
Often I see python programmers in python2 writing code like print ('Hello World') claiming “this will work in both python 2 and 3!” This is a noble pursuit, but the approach is misguided. In python3 the above behaves as a call to the print() function with a single argument, in python2 it is a print statement followed by a parenthesized expression. Once there are two arguments though, it fails print ('Hello', 'World') Under python3 this works as expected, but under python2 there is only one object following the print, and that object is a tuple. This will print the tuple as-is ('Hello', 'World') So this piece of common misinformation is easily dismissed, the above is by no means universal since we can’t have multiple arguments. A real attempt at a universal print function means there can only be one object that is printed. The next step is to use join. print (' '.join(('Hello', 'World'))) Great! call join with a tuple of ‘Hello’ and ‘World’, the separator space will do what printing normally does, then wrap that in parentheses to get back to step one (something that works with the print function). We also get the ability to use a custom separator when python2’s print only allows a space. It’s starting to seem like a lot of different things happening and that leaves room for human error. But there is a bigger problem. What happens when something other than a string is passed? print (' '.join(('Hello', 'World', 2))) join only works with strings, so now we have the error TypeError: sequence item 2: expected string, int found but that’s easily fixed by tossing a generator expression inside the join call to convert each item in the tuple to as string. print (' '.join(str(e) for e in ('Hello', 'World', 2))) Voila! a cross-python print function in just 5 easy steps - put all of your arguments inside a tuple - wrap that in a generator expression to convert them all to strings - put that inside a call to ' '.join() - wrap that with parentheses - instert a Though there are two problems that remain: printing without a newline and printing to a file. Printing to a file can be achieved by changing stdout during the print call. Assuming we have a file object f out, sys.stdout = sys.stdout, f print (' '.join(str(e) for e in ('Hello', 'World', 2))) sys.stdout = out Change stdout to be f during the print call, then change it back after. I would strongly recommend putting that inside of a context manager (which exists in python3.4 stdlib) in case printing raises an exception. If you can figure out a way to equate end='' with a trailing comma let me know. Though at this level of complexity you might as well be using sys.stdout.write since getting equality means throwing out most of the convenience that What you should take away from this is that there isn’t a simple way to write print syntax that works with both python2 and python3. If it’s available, in python2 I generally recommend from __future__ import print_function so you can just use the real python3 print syntax in python2. If it’s not, then write your own print function. If none of this is an option just use one or the other!. 2to3 can convert someday if you ever need to. If you’re considering using what I’ve demonstrated here, please realize that this post is meant to show that trying to accomplish a universal print syntax is a mess. Don’t do this in real code.
https://makecleanandmake.com/category/programming/
CC-MAIN-2022-05
refinedweb
604
70.53
I was going through my bug list today and noticed this bug, Document how to embed fonts in FX apps. A quick google search turned up a single forum posting on the subject which just linked back to the root bug (lucky for me I put the solution in the bug report so people weren’t completely stuck). Surprisingly, I couldn’t find any documentation on the subject here at FXExperience.com, so I thought to rectify that with a short post explaining the subject. I want to first say that the technique I’m about to describe is supported on all three platforms. This technique requires bundling the font files with your jars. So be sure that you have the rights to redistribute your chosen font files in this manner. We are looking at the CSS 3 specification for web fonts and hope to be able to support it fully when it goes final. Within your application JAR file is a META-INF directory. It is not uncommon for frameworks to put special properties files or other files into this directory. At runtime, they read these files to bootstrap themselves. This is essentially what we are doing in JavaFX. To embed a TrueType font with a JavaFX application, you need to: - Add the ttf file to some directory in your source tree - Create a fonts.mf file - Add lines to the fonts.mf file mapping a “logical name” to the path of the embedded ttf file - Modify your build process to copy the fonts.mf file into the META-INF of your final JAR file - In your JavaFX Script file, simply create a font by name referring to the “logical name” you entered in your fonts.mf file. The fonts.mf file is nothing more than a properties file, mapping logical names to actual font files. The reason we took this approach as opposed to simply allowing you to load a font from an input stream, is that such dynamic font loading was not supported on the MIDP based JavaFX mobile implementation. For this tip, I’ve chosen to use one of the fonts which are free for non-commercial use at 1001 Free Fonts. It is called Birth of a Hero. You can just download that file, and extract the ttf file into a directory of your choosing. I created a project called “Hero” which consists of a single package also called “hero” (note the lower case). I extracted the “BIRTH OF A HERO.ttf” file into that package. I then created the following fonts.mf file and put it in my projects root directory (using NetBeans, I did this by using the “Files” tab). Hero=/hero/BIRTH\ OF\ A\ HERO.ttf I could have renamed the TTF file to be something without spaces, but decided for the sake of example to show also how to deal with spaces in file names in the fonts.mf file. Note the leading slash on the path. This is very important. I next had to modify my build script to make sure fonts.mf gets placed in the application JAR file. In my NetBeans project, I did this by using the following: <target name="-pre-compile" depends="init"> <mkdir dir="${build.dir}/compiled/META-INF"/> <copy todir="${build.dir}/compiled/META-INF" file="fonts.mf" /> </target> Now that I have the ttf file and fonts.mf in my jar, I simply have to use the font from JavaFX: Label { text: "Birth of a Hero" font: Font { name:"Hero" size: 128 } } Now when I run this application, I get a warning message printed which says “Failed to get style for font ‘Hero'”. You can safely ignore this, but it is annoying and actually it is telling us something useful. If you inspect the style of the Font, you would find that there is not one. If you would like a style, you need to build it into the name. For example, lets change the name of our font from “Hero” to “Hero Regular”. We will then find that the style of the font is “Regular” and the warning message goes away. Hero\ Regular=/hero/BIRTH\ OF\ A\ HERO.ttf import javafx.scene.text.Font; import javafx.scene.control.Label; Label { text: "Birth of a Hero" font: Font { name: "Hero Regular" size: 128 } } And here is our finished product: While trying to do this in our FX applicaction, I noticed that this works only with only true type fonts (*.ttf). If I use a open type font (*.otf), it throws a FontFormatException: Unsupported sfnt. Will be good for your readers if they knew this before trying with otf. Hope it helps! -Dhruva I’m trying to include a custom font in my application. When I test-run the code from within netbeans everything looks great. However, if I deploy it (or even run it from webstart from within netbeans) the fonts are not loaded and are replaced with a generic font. I made a test class that prints all the fonts returned by Font.getFamilies(). It shows the custom fonts when I run it normally, but not when I run it through webstart. Can anyone tell me why webstart can’t see the fonts? Is there any way we can get NetBeans screenshots? I tried following your instructions, but it’s not working for some reason. Thanks, Jon This doesn’t work with newer JRE’s, It works fine with me with my jdk 1.6.14 but using 1.6.21 any custom fonts I use are ignored. Does anyone know of some work arounds? I created an issue to track the bug with custom fonts not working in applets or net-start: RT-9667. The issue has a zipped up netbeans project all setup as described in this article. I hope it helps someone resolve the issue. Another way to accomplish this (with JavaFX 2.1 and Jdk 7.0.09) is to put the ttf in the same location as your fxml application class, and use IO’s InputStream… Button newButton = new Button(“OpenSans Bold”); InputStream is = this.getClass().getResourceAsStream(“OpenSans-Bold.ttf”); newButton.setFont(Font.loadFont(is, 12)); how i add hindi font in javafx text filde Hello, I’m working with Netbeans 6.9.1 and JavaFX 1.3. Can someone help me with my schoolproject? I’m trying to get a ‘var number’ into a stage -> label text. var number:String; doesn’t work Thanks, Jack
http://fxexperience.com/2010/05/how-to-embed-fonts/
CC-MAIN-2020-16
refinedweb
1,079
74.29
Details - Type: Improvement - Status: Closed - Priority: Minor - Resolution: Fixed - Affects Version/s: 2.4.1 - - Component/s: core/index - Labels:None - Lucene Fields:New Description For things like merging field caches or bitsets, it's useful to know which segments were merged to create a new segment. Activity - All - Work Log - History - Activity - Transitions Yep! Shouldn't we make that method package private? (Since it's arg (MergePolicy.OneMerge.*) is package private). Added a protected IW.mergeSuccess method. We can't really do much more right now until we wrap SegmentInfo with a public class or make it public. But you'll still need access to package-private stuff, eg MergePolicy.OneMerge? We can make it protected that way it's expert level and a user needs to inherit from IndexWriter to use it. I don't think today it's possible to simply inherit from IW to get the merge information because IW.merge is final, and there needs to be a way to know the merge was successful. Well, I'm worried about how much [now package private] info you're gonna need about the merging segments – it doesn't seem like a simple change to me. Can we put this one in 2.9? It seems like a fairly straightfoward change. Or make it a protected method? Moving out. The problem is you need more information than simply "these segments got merged" to actually do something interesting with your caches. Okay, now I've thought a bit. What we need is a notification on which segments remained, which are new and which got toasted, plus docid ranges for them. Their ancestry is irrelevant, because you're right, to exploit it we also need deleted docs, and then replicate some of the merging logic and it gets really messy from here. Dropping parts of the cache related to dead segments, rebasing survivors and doing a fair-and-square load/uninversion/whatever for new ones is enough. Can you explain what's missing in Lucene's FieldCache? It's not that easy to say. Our version was initially used only for sorting, but without concurrency issues and with async warmup. But then we used it to load docs (way better than storing fields and using IndexReader.document), tied up with our strongly-typed-fields code, added handling for multi-valued fields, used it for faceted searches. So now it is essentially just something different from Lucene field cache. This is required in one form or another for any kinds of segment-aware caches. The problem is you need more information than simply "these segments got merged" to actually do something interesting with your caches. EG you'd need to know which deleted docs got zapped, right? We're currently using our own field cache (I doubt we'll ever switch back to lucene's native, fixed one or not) Can you explain what's missing in Lucene's FieldCache? (Since we are going to build a new one for LUCENE-831 it'd be great to address all known limitations...). .bq I'd like to step back and understand the wider use case / context that's driving this need (to know precisely when segments got merged) This is required in one form or another for any kinds of segment-aware caches. We're currently using our own field cache (I doubt we'll ever switch back to lucene's native, fixed one or not) and filter cache. Both caches are warmed up on reopen, asynchronously from search requests and both would warm up considerably faster if we have data on how segments have changed. Jason once LUCENE-1516 is in, can you repost this patch? It's hard to see what's new, here. I think it's good to take a step back, "if we fix Lucene's field cache, and Lucene's near real-time search manages CSF's efficiently in memory" fixes the use case. Relying on CSF coming in probably won't help this the case if it doesn't make it into the 2.9 release. I like the callback method because it does not rely on passing segment infos around and instead uses the already public IndexReader classes. I'd like to step back and understand the wider use case / context that's driving this need (to know precisely when segments got merged). EG if we fix Lucene's field cache, and Lucene's near real-time search manages CSF's efficiently in memory, does that address the use case behind this? It's possible that we should simply make SegmentInfo(s) public, so that MergePolicy/Scheduler can be fully created external to Lucene, and track all specifics of why/when merges are happening. But those APIs have a high surface area, and we do make changes over time. I would like to move away from our current position of somewhat closed APIs that require user classes be a part of the Lucene packages. It's always best to reuse existing APIs, however we've migrated to OSGi which means anytime we need to place new classes in Lucene packages, we need to rollout specific JARs (I think, perhaps it's more complex) for the few classes outside of our main package classes. This makes deployment of search applications a bit more difficult and time consuming. A related thread regarding MergePolicy is at: I think this can be achieved, today, by making your own MergeScheduler wrapper, or by subclassing ConcurrentMergeScheduler and eg overriding the doMerge method? If so, I'd prefer not to add a callback to IW. Patch is combined with LUCENE-1516. IndexWriter has a setSegmentMergerCallback method that is called in IW.mergeMiddle where the readers being merged and the newly merged reader are passed to the SMC.mergedSegments method. I think we need to expose the SegmentReader segment name somehow either via IndexReader.getSegmentName or an interface on top of SegmentReader? OK I committed it! Thanks Jason!
https://issues.apache.org/jira/browse/LUCENE-1584?focusedCommentId=12696445&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel
CC-MAIN-2015-35
refinedweb
997
71.44
import java.util.Scanner; import java.util.Random; public class demo { public static void main(String[] args) { Random rand= new Random(); // being able to use random object int[][] array = new int[3][9]; // print array in rectangular form for (int i=0; i<array.length; i++) { for (int j=0; j<array[i].length; j++) { for(int x=0;x<5;x++) { int k = rand.nextInt(9); array[i][k] = k*10 +rand.nextInt(9)+1; } System.out.print("\t" + array[i][j]); } System.out.println("");// } } } I'm creating a bingo ticket. I need to respect these rules - The first column contains numbers from 1 to 9, - The second column numbers from 10 to 19, - The third 20 to 29 and so on up until the last column, which contains numbers from 80 to 90 (the 90 being placed in this column as well) - Each row contains five numbers and four blank spaces I'm having a problem with the 5 numbers max per row, i created the array[3][9], then i made a loop so it would put 5 numbers in a random place in the row but the output show me that there is a number in everywhere ... Did i made a mistake somewhere ? thank you
http://www.javaprogrammingforums.com/whats-wrong-my-code/12438-bingo-ticket-loop-problem-doesnt-compile-like-should.html
CC-MAIN-2013-48
refinedweb
209
69.11
Simple Note Code #include <iostream> #include <stack> using namespace std; /** Test basic operations of stack. * * From * Stacks are a type of container adaptor,. * * Tested functions: * * empty: bool empty ( ) const; * Returns whether the stack is empty, i.e. whether its size is 0. * * * size: size_type size ( ) const; * Returns the number of elements in the stack. * * * push: void push ( const T& x ); * Adds a new element at the top of the stack, above its current top element. * The content of this new element is initialized to a copy of x. * * * * pop: void pop ( ); * Removes the element on top of the stack, effectively reducing its size by one. * The value of this element can be retrieved before being popped by calling member stack::top. * * NOTE: pop probably cause runtime error if stack is empty * * * top: value_type& top ( ); or const value_type& top ( ) const; * Returns a reference to the next element in the stack. Since stacks are last-in first-out * containers this is also the last element pushed into the stack. * * NOTE: top probably cause runtime error if stack is empty * */ stack<int> testEmptySizePush (stack<int> intStack); stack<int> testTop (stack<int> intStack); stack<int> testPop (stack<int> intStack); int main() { stack<int> intStack; intStack = testEmptySizePush (intStack); intStack = testTop (intStack); intStack = testPop (intStack); system("PAUSE"); return 0; } stack<int> testEmptySizePush (stack<int> intStack) { cout << "function testEmptySizePush" << endl; // test empty and size cout << "is intStack empty? " << (intStack.empty()? "true" : "false") << endl; cout << "size of intStack is: " << intStack.size() << endl; cout << "push 3 then 5 into intStack" << endl; // test push intStack.push(3); intStack.push(5); // test empty and size again cout << "is intStack empty? " << (intStack.empty()? "true" : "false") << endl; cout << "size of intStack is: " << intStack.size() << endl << endl; return intStack; } stack<int> testTop (stack<int> intStack) { cout << "function testTop" << endl; if (!intStack.empty()) { // test top cout << "current top value is: " << intStack.top() << endl; cout << "add 5 to top value" << endl; intStack.top() += 5; cout << "modified top value is: " << intStack.top() << endl << endl; } return intStack; } stack<int> testPop (stack<int> intStack) { cout << "function testPop" << endl; if (!intStack.empty()) { // test pop cout << "top value before pop is: " << intStack.top() << endl; cout << "pop top value" << endl; intStack.pop(); cout << "top value after pop is: " << intStack.top() << endl << endl; } return intStack; } Result Reference stack - C++ Reference Download stack.cpp
http://ben-bai.blogspot.com/2013/07/cpp-stack-practice.html
CC-MAIN-2018-39
refinedweb
380
65.93