text stringlengths 20 1.01M | url stringlengths 14 1.25k | dump stringlengths 9 15 ⌀ | lang stringclasses 4
values | source stringclasses 4
values |
|---|---|---|---|---|
Asked by:
CPU Scheduling Algorithms Simulation
- Anyone can help me on these problem? I am not good in C++.
These are the task to be solved..
Problem:.
And here's the code:
All we need to be solved is the problem above..
Thanks..
#include <iostream>
#include <cstdlib>
#include <cmath>
#define MAX 1000 // max array size
#define Q 1 // quantum time unit to be used by round robin scheduling
#define BUSY 1
#define IDLE 0
using namespace std;
//global variables
struct process_type{
int pid;
float BT;
float AT;
int priority;
};
struct stat_type{
int pid;
float BT;
float AT;
int priority;
float ET;
float TT;
float WT;
};
process_type process_list[MAX], ready_q[MAX];
stat_type process_stat[MAX];
float sim_time; // the simulation clock
float next_arrival; //next arrival time
float next_departure; // next departure time
int server_status; //is the cpu busy or idle?
int listsize, qsize; //# of processes in the process list and ready queue
int pid; // current process id,
int nxt_aps; // process id of next arriving process
int nxt_rps; // process id of next process to use the cpu
//function prototypes
void initialize(float mean_arrival, float mean_burst, int num_process);
void remove_from_list(int idx);
void remove_from_readyq(int idx);
int get_next_arriving_process(int n);
int get_nxt_run_process(int algorithm);
float expon(float mean);
void printstat(int n);
//function definitions
int main(){
int seed_num = 1; // used by random-number generator
int num_process = 0; // number of process using the cpu;
int required_process = 5; //number of processes that will use the cpu
float mean_arrival = 5.0; //average arrival rate
float mean_burst = 6.5; // average cpu burst rate
int algo = 2; // algorithm to use: 2 = SJF
int cpid; // pid using the cpu
srandom(seed_num); //initialize random number generator
// change to srand() on windows platform and compiler is not gnu cpp
initialize(mean_arrival, mean_burst, required_process);
do{
if ((next_arrival < next_departure) && (listsize > 0)){
//next event is arrival
if (server_status == BUSY){
//if server is busy, enqueue
sim_time = next_arrival;
pid = process_list[nxt_aps].pid;
cout << "Time = " << sim_time << ". Process " << pid
<< " arrived. Server is busy. Enqueueing." << endl;
ready_q[qsize].pid = process_list[nxt_aps].pid;
ready_q[qsize].AT = process_list[nxt_aps].AT;
ready_q[qsize].BT = process_list[nxt_aps].BT;
ready_q[qsize].priority = process_list[nxt_aps].priority;
qsize++;
remove_from_list(nxt_aps); //remove from process list;
//schedule next arrival
nxt_aps = get_next_arriving_process(listsize);
next_arrival = process_list[nxt_aps].AT;
}
else{ // server is IDLE, run process
// get next arrival
sim_time = next_arrival;
pid = process_list[nxt_aps].pid;
cout << "Time = " << sim_time << ". Process " << pid
<< " arrived. Server is idle." << endl;
cout << " CPU will run process until time = "
<< sim_time + process_list[nxt_aps].BT << endl;
//get statistics data
process_stat[pid-1].pid = pid;
process_stat[pid-1].BT = process_list[nxt_aps].BT;
process_stat[pid-1].AT = process_list[nxt_aps].AT;
process_stat[pid-1].priority = process_list[nxt_aps].priority;
process_stat[pid-1].WT = 0.0;
process_stat[pid-1].ET = sim_time;
process_stat[pid-1].TT = process_list[nxt_aps].BT + 0.0;
server_status = BUSY;
cpid = pid; // pid using the cpu
next_departure = sim_time + process_list[nxt_aps].BT;
remove_from_list(nxt_aps); //remove from process list;
nxt_aps = get_next_arriving_process(listsize);
next_arrival = process_list[nxt_aps].AT;
}
}
else{
// next event is departure
//if queue is empty,
if (qsize == 0){
// wait queue is empty, wait for next arrival
sim_time = next_departure;
cout << "Time = " << sim_time << ". Process " << cpid
<< " is finished with the CPU . Ready queue is empty."
<< endl;
server_status = IDLE;
// make sure that next_arrival is always < next_departure
next_departure = 1.0e+30;
}
else{
//get next process from wait queue
sim_time = next_departure;
cout << "Time = " << sim_time << ". Process " << cpid
<< " is finished with the CPU. " << endl;
cout << " Getting next process from the ready queue."
<< endl;
//run new process in CPU
nxt_rps = get_nxt_run_process(algo);
cpid = ready_q[nxt_rps].pid;
cout << " Running next process: Process "
<< cpid << "."<< endl;
cout << " CPU will run process until time = "
<< sim_time + ready_q[nxt_rps].BT << endl;
//get statistics data
process_stat[cpid-1].pid = ready_q[nxt_rps].pid;
process_stat[cpid-1].BT = ready_q[nxt_rps].BT;
process_stat[cpid-1].AT = ready_q[nxt_rps].AT;
process_stat[cpid-1].priority = ready_q[nxt_rps].priority;
process_stat[cpid-1].WT = sim_time - ready_q[nxt_rps].AT;
process_stat[cpid-1].ET = sim_time;
process_stat[cpid-1].TT = ready_q[nxt_aps].BT + (sim_time - ready_q[nxt_rps].AT);
//schedule next departure
next_departure = sim_time + ready_q[nxt_rps].BT;
remove_from_readyq(nxt_rps);
}
num_process++;
}
}while (num_process < required_process);
cout << "Simulation Ends." << endl;
cout << "*******************************" << endl;
cout << "Simulation Statistics:" << endl;
printstat(required_process);
return 0;
}
void initialize(float mean_arrival, float mean_burst, int num_process){
// the initialization function, of course
int i;
sim_time = 0.0; //initialize simulation clock
listsize = num_process;
//initialize process list
for (i=0; i< num_process; i++){
process_list[i].pid = i+1;
process_list[i].AT = expon(mean_arrival);
process_list[i].BT = expon(mean_burst);
process_list[i].priority = random();
}
// print process list
for (i=0; i< num_process; i++)
cout << i+1 << " " << process_list[i].AT << " " <<
process_list[i].BT << " " <<
process_list[i].priority << endl;
cout << "**********************************" << endl;
cout << "Simulation starts." << endl;
cout << "Time = 0.0" << endl;
// get next arrival;
nxt_aps = get_next_arriving_process(num_process);
//schedule next arrival
next_arrival = sim_time + process_list[nxt_aps].AT;
//schedule next departure
next_departure = 1.0e+30;
qsize = 0;
server_status = IDLE;
}
void remove_from_list(int idx){
// remove contents of process_list[idx]
int i;
for (i=idx+1; i< listsize; i++){
process_list[i-1].pid = process_list[i].pid;
process_list[i-1].AT = process_list[i].AT;
process_list[i-1].BT = process_list[i].BT;
process_list[i-1].priority = process_list[i].priority;
}
listsize--;
}
void remove_from_readyq(int idx){
// remove contents of process_list[idx]
int i;
for (i=idx+1; i< qsize; i++){
ready_q[i-1].pid = ready_q[i].pid;
ready_q[i-1].AT = ready_q[i].AT;
ready_q[i-1].BT = ready_q[i].BT;
ready_q[i-1].priority = ready_q[i].priority;
}
qsize--;
}
int get_next_arriving_process(int n){
// return the index with the smallest AT value;
float min_AT = process_list[0].AT;
int min_idx = 0;
int i;// current process id,
for (i=1; i< n; i++){// current process id,
if (process_list[i].AT < min_AT){
min_AT = process_list[i].AT;
min_idx = i;
}
}
return min_idx;
}
int get_nxt_run_process(int algorithm){
// determines the next process to execute from among the processes in the ready
// queue. The algorithm used determines which process is chosen.
int i;
float min_value;
int min_idx;
switch (algorithm){
case 1: // First Come, First Serverd
break;
case 2: // Shortest Job First (Non-preemptive)
min_value = ready_q[0].BT;
min_idx = 0;
for (i= 1; i < qsize; i++){
if (ready_q[i].BT < min_value){
min_value = ready_q[i].BT;
min_idx = i;
}
}
break;
case 3: // Priority (non-preemptive)
break;
case 4: // Round-robin with quantum Q (non-preemptive)
break;
}
return min_idx;
}
float expon(float mean){
// exponential random variate generation
float u, x;
// generate a U(0,1) random variate
x = random();
// change to rand() on windows platform and compiler is not gnu cpp
u = x / RAND_MAX;
return (-mean * log(u));
}
void printstat(int n){
int i;
float sum_WT, sum_TT;
sum_WT = 0.0;
sum_TT = 0.0;
for (i=0; i < n; i++){
cout << "PID = " << process_stat[i].pid << ". ";
cout << "A.T. = " << process_stat[i].AT << ". ";
cout << "B.T. = " << process_stat[i].BT << ". ";
cout << "prio = " << process_stat[i].priority << endl;
cout << " E.T. = " << process_stat[i].ET << ". ";
cout << "W.T. = " << process_stat[i].WT << ". ";
cout << "T.T. = " << process_stat[i].TT << endl;
sum_WT = sum_WT + process_stat[i].WT;
sum_TT = sum_TT + process_stat[i].TT;
}
cout << "Average process waiting time = " << sum_WT / n << endl;
cout << "Average process turnaround time = " << sum_TT / n << endl;
}
- Changed type nobugzMVP, Moderator Tuesday, March 24, 2009 1:07 PM homework
General discussion
All replies
- These are the things I want to solved using the code about...
Quote>I am not good in C++.
Then I recommend that you wait until you *have*
achieved a reasonable level of competence before
undertaking sophisticated programming tasks.
Quote>All we need to be solved is the problem above..
Is that all? ;=) Who is "we" ?
Quote>These are the things I want to solved using the code ...
If this is a class assignment, you have a lot of work
ahead of you. Start with the task which seems easiest
for you. If you can't handle *any* of the tasks, consider
dropping the course at this time. (After discussing your
problems with the instructor.) If you get others to do the
thinking and programming for you, you certainly won't deserve
credit for the course.
If this is a work project, consider having it reassigned
to someone with more competence. If need be, hire a
consultant.
These forums are generally for providing assistance with
specific problems, not for doing complete programming
assignments. Code your own solutions to these requirements
and ask here for help with particular issues. Limit your
requests to one problem per post, and you will improve
your chances of getting replies.
Good luck with your project.
- Wayne | https://social.msdn.microsoft.com/Forums/vstudio/en-US/742c6d80-c822-4262-a6c4-b43226979e8c/cpu-scheduling-algorithms-simulation?forum=vcgeneral | CC-MAIN-2015-14 | en | refinedweb |
.
Prepare your application for Nokia E6
Symbian Anna
This article contains tips and tricks to help optimise application UIs for the Nokia E6. Most of the advice given is Nokia's standard guidance for writing scalable UIs, however there are other useful tips related to keyboard support and home screen widgets etc.
Introduction
The Nokia E6 is the first device to feature the Symbian Anna release. The device has a set of features that set it apart from the other Symbian^3 based products, including a touch display with VGA resolution (640x480 px) at high pixel density (326 DPI), a fixed landscape orientation, a monoblock design with keyboard and navikey.
Most applications written using existing Nokia's UI design guidelines related to scalability should display acceptably on the new Nokia E6 without change (for example, the screenshots in this article are from scalable applications that run without modification on the Nokia E6). Applications that have made hard coded assumptions about screen resolution, size and pixel density and key event handling may need significant changes to support scalable UIs and other differences between the Nokia E6 and other Symbian^3 devices.
This article has useful tips for developers working in all development frameworks (e.g. Symbian C++, Java, Qt).
Native applications
Native (C++) applications should work properly on all devices, regardless of their supported screen resolutions and orientations. This requires that applications correctly scale their UI to fill the available space, along with any associated fonts and graphical resources, and that touchable UI elements aren't scaled below the point where they can no longer be used. In addition, the application must respond to orientation events unless there is a good reason to lock them to a specific orientation.
Symbian C++ developers can use the Scalable UI framework to create applications that work properly on devices with different screen resolutions and at different orientations (see Symbian Scalable UI Framework for an overview of the related concepts).
Qt developers benefit from layout managers that can dynamically position and resize QWidget and QGraphicsView based components to fill the available space.
Use all of the available screen real estate
Symbian applications almost invariably occupy the whole screen. The application may take control of the whole of the screen to draw its UI, or it may allow the application framework to manage the standard screen furniture (status panes and softkey areas) and draw its UI within the remaining client rectangle.
Whatever approach is used, it is important that applications make no assumptions about the available client or screen area, other than of course determining the minimum area needed for the application so that all needed UI elements can fit into the visible area. Failing to allow for screen size variation will result in UI elements placed at the wrong locations (either partially off screen or grouped together in a corner leaving significant on-screen real estate unused).
Recommendations:
- When developing Qt applications, set the MainWindow (or main widget) to show maximized and then use layout managers within this window to position, align and resize the UI elements. If using custom code for layout, do not hardcode assumptions about the screen size.
- Qt Quick has a built-in anchoring system as well, which provides the mechanism for controlling the positioning and size of the QML components. For more information about managing different screen form factors and resolutions, please see the Qt 4.7 Scalability (snapshot) document.
- When developing Symbian C++ applications:
- where possible use predefined templates. Avkon UI components including forms, list, toolbars, navigation tabs etc. scale automatically and will be positioned correctly whether the app is run on Nokia 5500’s tiny screen or on a device with VGA resolution like the Nokia E6.
- when using custom UI elements always ensure that they are positioned and sized relative to each device’s screen size and client rectangle geometry.
Noughts and Crosses - designed to work on Symbian/S60 3.x and later devices, of any resolution and form factor, touch, non-touch or hybrid. Works perfectly on Nokia E6 as well.
Settings Screen - designed to use stock Avkon controls, which are guaranteed to scale automatically to all resolutions, following the predefined platform layout.
Scale graphic resources
Graphic resources associated with resizable UI components may also need to be scaled. For best results, use scalable vector graphics (SVG) whenever possible, even if the API can also accept bitmap format.
When providing a background image to your UI elements (views, lists, etc.) you should ensure that the respective resource is either large enough to cover all possible resolutions or that you scale the available resource accordingly. The best approach is for the application to use both solutions, and have a high resolution image that can be used at native resolution or can be scaled-up when needed. SVG resources could be considered for this use case as well.
Finger-touchable UI elements
Nokia’s devices are expected to be finger touch friendly, which simply means that UI elements must be big enough for the user to be able to tap their active areas. The Symbian UI style guide indicates that active elements for touch functions should be at least 7x7 mm in size, with at least 1 mm spacing in between them. See Scale and positioning of controls in the Design and User Experience Library (v2.2)
Note that not all devices have the same display parameters. 7x7mm translates in different sizes in pixels, depending on screen’s DPI parameter. Applications should take this factor under consideration as well, and dynamically calculate the minimal UI sizes of the finger touchable UI elements. The components can then be allowed to scale up from the given minimal value for each device, but not below it.
Orientation changes
Most Nokia devices will change display logical orientation when the device is tilted, adapting to device’s current position. Applications are expected to handle the orientation changed notification and adapt their application UI to the new client area.
Note however that not all devices have orientation change enabled and that, at for certain form factors, the screen orientation is not always in sync with device’s orientation. For example, on Nokia N8 the screen has the same orientation as the device while on the Nokia E6 the screen is in landscape mode when the device is in portrait mode and the orientation does not change when the phone is tilted.
Applications should avoid hard coding the phone’s orientation (e.g. always switch to landscape), leaving it a decision to be made based on reading the phone’s actual parameters.
Font size
The DPI parameter of the screen can also have a significant impact on the size of the fonts used by the application. At higher DPI the font would appear to be smaller, therefore the application must also dynamically manage the font size.
The recommended way is to use the logical fonts provided through the UI Framework Utilities API (AknLayoutUtils ...)
With Qt/QML, the Qt Quick Components offer the Style QML element which provides device specific constants similar to Avkon's logical fonts, which allow applications to use fonts of predefined sizes defined to match the DPI and overall look and feel of the targeted device.
import com.nokia.symbian 1.0
...
Text
{
font.pixelSize: platformStyle.fontSizeSmall
text: "Small font on all Symbian devices although the actual pixel size varies function of screen's DPI"
color: platformStyle.colorNormalLight
}
Keyboard handling
Symbian touch devices may also have a physical keyboard, and this may only be active in certain operation modes (foldable keyboard can be exposed or hidden) or may be active all the time (monoblock design with touch screen).
Applications should be designed to allow both touch and keyboard operation. For example when a list of options is presented to the user he/she may choose to make the selection by tapping on the screen or by using a navikey or directional keys from the keyboard.
In particular for Nokia E6, the device does not have a dedicated camera key. Applications should therefore be ready to handle EKeyOk as a camera trigger button.
The virtual keyboard in split view
With the new Symbian Anna release more of the in-built applications are taking advantage of the split view input and end-users will be expecting to see the same in 3rd party applications. While not particularly useful for the Nokia E6, which has an always-on physical qwerty keyboard, adding the new functionality is rather easy (once you have taken care of the above discussed scalability issues described above). For an example of how to implement the split view input, see Split view input in Symbian C++ applications.
Home Screen widgets
Symbian^3 does not provide a public API for publishing content to home screen widgets. However, the Nokia N97 device specific API Home Screen Publishing API can also be used on the Nokia E6.
Note however that the higher pixel density on the Nokia E6 means that their size is increased from 312x82 to 436x115 pixels. While content is automatically scaled and existing widgets will continue to work, images may become pixelated. Using larger/scalable graphic resources, matching the new VGA CSS, will produce better results. The image below shows two unmodified widgets running on VGA layout: the Qt Homescreen (using the threerows template) and Gravity (with a threerowtext template).
The CSS of the published templates has been updated as well, see Home Screen widget template CSS for VGA devices
If you are developing a widget with a custom template, using Symbian's platform XML API, you must create a new variant for VGA layout, with the manifest defining the family as vga_tch.
Java applications
The considerations presented above regarding application scalability remain valid. They are reflected in Java Developer Library's Scalability chapter.
Do not use Nokia-MIDlet-App-Orientation
Starting from S60 5th Edition, Java applications can force UI to either portrait or landscape orientation by using JAD attribute Nokia-MIDlet-App-Orientation.
This JAD attribute should not be used with devices with VGA display resolution because:
- Since VGA display resolution (640 x 480) stands for having landscape orientation as the default and only orientation for application UI, setting landscape orientation mode by using the JAD attribute is not required.
- Setting portrait orientation mode by using the JAD attribute can result in unexpected and undesired outcome on placing the layout of application UI on the display.
Scale to VGA resolution
In Symbian starting from S60 3rd Edition, Nokia-MIDlet-Original-Display-Size and Nokia-MIDlet-Target-Display-Size attributes can be used for graphics scaling of full screen Canvases.
In case a Java applications which targets a VGA device and which has been originally set to use these JAD attributes for some other resolution than VGA, redefining the values of the attributes needs to be done in order to optimize the application specifically for VGA display resolution.
The JAD attributes can be used for scaling the Java application for VGA resolution as follows:
- For Java applications which are designed and targeted only for devices with VGA resolution, it is sufficient in most cases to define only Nokia-MIDlet-Original-Display-Size attribute which will enable the application to use full screen automatically while maintaining the original aspect ratio (although that might leave black edges on the display area).
- The attribute Nokia-MIDlet-Target-Display-Size should only be used in special cases, for example when application resolution needs to be limited to a certain size or aspect ratio. If the attribute is not set, the MIDlet is scaled without changing aspect ratio. For VGA resolution, application will be scaled to fill the whole screen but the aspect ratio changes and thus the result can be visibly different (which, depending on the use case, can have impact from user experience point of view).
More information
More about graphics scaling and related JAD attributes can be found from Nokia Developer Java Developer's Library:
Web and Web Runtime applications
In addition to the considerations imposed by the new screen resolution, Web and Web Runtime developers will also benefit from an updated browser and improved widget security prompts user experience. For details, please see Symbian Browser and Web Runtime 7.3 for Developers
Ovi App Wizard
Applications generated using the Ovi App Wizard tool will be automatically scaled to VGA resolution.
Testing your application
To test your application on a Nokia E6 device you can use the free Remote Device Access service, provided by Nokia Developer. The current releases of the Qt SDK 1.1 and Symbian^3 SDK 1.0 have simulation/emulation support for VGA screen resolution as well.
What about with QML, where usually everything is hardcoded
What about on Qt?
What does this mean? Any app, or any camera app? Are you saying that any app that doesn't use EKeyOK should prepare to launch the camera?
Basically this is Symbian C++ specific, and it needs to explain how you can turn off split view if it makes no sense in a device.
Not sure that makes sense. Do you mean "camera applications should be ready ..."??
Any advice here for Qt? Do the standard components do this for you?
Very Symbian C++ centric. What is the story for Qt and logical fonts? This is a simple approach, but we should also point to code for scaling on device independent font metrics - twips.
Broken link for "Qt Homescreen"
Perhaps we should also link to some common glossary terms like "navikey" | http://developer.nokia.com/community/wiki/Prepare_your_application_for_Nokia_E6 | CC-MAIN-2015-14 | en | refinedweb |
saving xml to a String variable
saving xml to a String variable
Hello guys,
I'm writing my first project in Ext GWT and I've got the following
problem:
I want to load XML file and save it to a local String variable.
Code:
public class XmlFile { String temp; public void greetServer() { RequestBuilder requestBuilder = new RequestBuilder (RequestBuilder.GET, "Test.gwt.xml"); try { requestBuilder.sendRequest(null,new RequestCallback() { @Override public void onResponseReceived(Request request, Response response) { if (response.getStatusCode()==200) { temp=response.getText(); System.out.println(temp); //HERE OK } } @Override public void onError(Request request, Throwable exception) { Window.alert(exception.getMessage().toString()); } }); } catch (Exception ex) { Window.alert(ex.getMessage().toString()); } } System.out.println(temp); //HERE ALWAYS NULL }
some other things with it in other classes. If I do it this way my
"temp" varaible is always null outside "onResponseReceived" function.
I've searched the Internet and found that it's because requestbuilder
is asynchronous and I should "make callback of this function". How can
I do that?
The request is async. Make sure you only access "temp" after onResponseReceived was called.
Could I do this that someone would be able to use my class like that?:
XMLFIle file;
file.greetServer();
System.out.println(file.temp);
Sven has already replied on your question, the request is asynchronous and you can access the result only after the callback function was called.
It seems hard at first, but with time you get use to work asynchronously.
Events is something you should work with.
Regards,
Michel. | http://www.sencha.com/forum/showthread.php?83167-saving-xml-to-a-String-variable | CC-MAIN-2015-14 | en | refinedweb |
/* ** (c) COPYRIGHT MIT 1995. ** Please first read the full copyright statement in the file COPYRIGH. */
This module provides some "make life easier" functions in order to get the application going. The functionality of this module was originally in HTAccess, but now It is part of the application interface which the application may use it if desired.
This module is implemented by HTHome.c, and it is a part of the W3C Sample Code Library.
#ifndef HTDIALOG_H #define HTDIALOG_H #include "WWWLib.h"
The following functions provide a default set of error messages and prompts in plain English. You can of course change this as you like.
This list corresponds to the enumeration list defined in the HTAlert module
#define HT_MSG_ENGLISH_INITIALIZER \ "Please enter username:", \ "Please enter username for proxy authentication:", \ "Please enter username for this FTP server:", \ "Password:", \ "Please give name of file to save in:", \ "Plase enter account:", \ "You might not be allowed to use this method here, continue?", \ "Location has moved, continue?", \ "A new set of rules is requested to be added to your setup - continue?", \ "This file already exists - replace existing file?", \ "Authentication failed - retry?", \ "Proxy authentication failed - retry?", \ "This method has already been performed - repeat operation?", \ "This document is very big - continue operation?", \ "The source document for this operation has moved - continue operation \ with new location?", \ "The destination document for this operation has moved - continue \ operation with new location?", \ "A redirection may change the behavior of this method - proceed anyway?", \ "An automatic request for changing proxy has been encountered - continue?", \ "The persistent cache is already in use by another user. If this is not \ the case then you can manually delete this lock and restart.", \ "The server has sent you a cookie - accept?"
This list corresponds to the enumeration list defined in the HTError module
/* CODE ERROR MESSAGE ERROR URL */ #define HTERR_ENGLISH_INITIALIZER \ { 100, "Continue", "information" }, \ { 101, "Switching Protocols", "information" }, \ { 200, "OK", "success" }, \ { 201, "Created", "success" }, \ { 202, "Accepted", "success" }, \ { 203, "Non-authoritative Information", "success" }, \ { 204, "Document Updated", "success" }, \ { 205, "Reset Content", "success" }, \ { 206, "Partial Content", "success" }, \ { 207, "Partial Update OK", "success" }, \ { 300, "Multiple Choices", "redirection" }, \ { 301, "Moved Permanently", "redirection" }, \ { 302, "Found", "redirection" }, \ { 303, "See Other", "redirection" }, \ { 304, "Not Modified", "redirection" }, \ { 305, "Use Proxy", "redirection" }, \ { 306, "Proxy Redirect", " }, \ { 416, "Range Not Satisfiable", "client_error" }, \ { 417, "Expectation Failed", "client_error" }, \ { 418, "Reauthentication Required", "client_error" }, \ { 419, "Proxy Reauthentication Reuired", "client_error" }, \ { 500, "Internal Server Error", "server_error" }, \ { 501, "Not Implemented", "server_error" }, \ { 502, "Bad Gateway", "server_error" }, \ { 503, "Service Unavailable", "server_error" }, \ { 504, "Gateway Timeout", "server_error" }, \ { 505, "HTTP Version not supported", "server_error" }, \ { 506, "Partial update Not Implemented", "server_error" }, \ \ /* Cache Warnings */ \ { 10, "Response is Stale", "cache" }, \ { 11, "Revalidation Failed", "cache" }, \ { 12, "Disconnected Opeartion", "cache" }, \ { 13, "Heuristic Expiration", "cache" }, \ { 14, "Transformation Applied", "cache" }, \ { 99, "Cache warning", "cache" }, \ \ /* Non-HTTP Error codes and warnings */ \ { 0, "Can't locate remote host", "internal" }, \ { 0, "No host name found", "internal" }, \ { 0, "No file name found or file not accessible", "internal" }, \ { 0, "FTP server replies", "internal" }, \ { 0, "FTP server doesn't reply", "internal" }, \ { 0, "FTP login failure", "internal" }, \ { 0, "Server timed out", "internal" }, \ { 0, "Gopher-server replies", "internal" }, \ { 0, "Data transfer interrupted", "internal" }, \ { 0, "Connection establishment interrupted", "internal" }, \ { 0, "CSO-server replies", "internal" }, \ { 0, "This is probably a HTTP server 0.9 or less","internal" }, \ { 0, "Bad, Incomplete, or Unknown Response", "internal" }, \ { 0, "Unknown access authentication scheme", "internal" }, \ { 0, "News-server replies", "internal" }, \ { 0, "Trying `ftp://' instead of `file://'", "internal" }, \ { 0, "Too many redirections", "internal" }, \ { 0, "Method not suited for automatic redirection","internal" }, \ { 0, "Premature End Of File", "internal" }, \ { 0, "Response from WAIS Server too Large - Extra lines \ ignored","internal"}, \ { 0, "WAIS-server doesn't return any data", "internal" }, \ { 0, "Can't connect to WAIS-server", "internal" }, \ { 0, "operation failed", "internal" }, \ { 0, "Wrong or unknown access scheme", "internal" }, \ { 0, "Access scheme not allowed in this context", "internal" }, \ { 0, "When you are connected, you can log in", "internal" }, \ { 0, "This version has expired and will be automatically reloaded", "internal" }, \ { 0, "Loading new rules must be explicitly acknowledged", "internal" }, \ { 0, "Automatic proxy redirection must be explicitly acknowledged", "internal" }
This function provides an unformatted English string containing the possibly nested status message that explains the result of a request. This is essentially "flattening out" the information provided in the request error strack. The string must be freed by the caller.
extern char * HTDialog_errorMessage (HTRequest * request, HTAlertOpcode op, int msgnum, const char * dfault, void * input);
This function provides a string containin an English progress message that the application can present to the user if desired. The string must be freed by the caller.
extern char * HTDialog_progressMessage (HTRequest * request, HTAlertOpcode op, int msgnum, const char * dfault, void * input);
You can register a set of callback functions to handle user prompting, error messages, confimations etc. Here we give a set of functions that can be used on almost anu thinkable platform. If you want to provide your own platform dependent implementation then fine :-)
This function prompts the user for a confirmation on the message passed as a parameter. If the user reacts in the affirmative, returns TRUE, returns FALSE otherwise.
extern HTAlertCallback HTConfirm;
Prompt for answer and get text back. Reply text is either NULL on error or a dynamic string which the caller must free.
extern HTAlertCallback HTPrompt;
Prompt for password without echoing the reply. Reply text is weither NULL on error or a dynamic string which the caller must free.
NOTE: The current version uses getpass which on many systems returns a string of 8 or 16 bytes.
extern HTAlertCallback HTPromptPassword;
This is just a composite function using HTPrompt and HTPromptPassword. The strings returned must be freed by caller.
extern HTAlertCallback HTPromptUsernameAndPassword;
This function simply puts out the message passed.
extern HTAlertCallback HTAlert;
This function can be used to indicate the current status of a certain action. In order to avoid having strings directly in the core parts of the Library, this function is passed a "state" argument from which the message can be generated in this module. The "param" argument is for additional information to be passed.
extern HTAlertCallback HTProgress;
This function outputs the content of the error list to standard output (used in Line Mode Browser), but smart clients and servers might overwrite this function so that the error messages can be handled to the user in a nice(r) way. That is the reason for putting the actual implementation in HTAlert.c.
extern HTAlertCallback HTError_print;
Default function that creates an error message using HTAlert() to put out the contents of the error_stack messages. Furthermore, the error_info structure contains a name of a help file that might be put up as a link. This file can then be multi-linguistic.
extern BOOL HTError_response (HTRequest * request, HTAlertOpcode op, int msgnum, const char * dfault, void * input, HTAlertPar * reply);
#endif /* HTDIALOG_H */ | http://www.w3.org/Library/src/HTDialog.html | CC-MAIN-2015-14 | en | refinedweb |
How to modify the string that I pass to the function that I get it back as modified.
I wanna get back just "world!" but without returning anything.
Code:#include <iostream> using namespace std; void f2(char * buf2) { buf2 += 6; cout<< buf2 <<endl; } int main() { char buf[88] = "Hello world!"; f2(buf); cout << buf << endl; return 0; } | http://cboard.cprogramming.com/cplusplus-programming/149229-modify-string-function.html | CC-MAIN-2015-14 | en | refinedweb |
21 May 2012 03:00 [Source: ICIS news]
DOHA (ICIS)--An Indian delegation attending International Fertilizer Industry Association (IFA) conference has withdrawn from the annual event amid controversy over phosphoric acid and diammonium phosphate (DAP) prices, sources said over the weekend.
Talks on second quarter phosphoric acid contracts have stalled through most of April and May, as Indian buyers negotiate a $45/tonne (€35/tonne) discount over first quarter prices in a firming phosphates market.
There were expectations Q2 contracts will be agreed at IFA, but Moroccan fertilizer producer Office Cherifien des Phosphates (OCP) confirmed negotiations with Indian joint venture partners Paradeep Phosphates Limited (PPL), Zuari, Tata Chemicals Limited (TCL) were off.
OCP, which supplies almost half India’s annual phosphoric acid requirement and settled at $905/tonne CFR (cost and freight) for Q1 contracts, is holding out for a similar Q2 price that equates to a DAP parity price of $550/tonne CFR.
The Fertilizer Association of India claimed suppliers were engaged in unfair practices after US and ?xml:namespace>
The move to withdraw from the global fertilizer industry’s annual conference was taken to underline Indian importers’ stance against offers of phosphoric acid and DAP.
Failure to agree a price, settled at $905/tonne CFR for Q1 contracts has delayed settlement on DAP contracts between the
Domestic DAP production has slowed due to the lack of imported phosphoric acid.
India, which imports almost 50% of globally traded DAP, enjoys a discount on contract prices, but the spot market is firming and suppliers view current buying ideas at $520/tonne CFR as out of kilter with the international market.
Almost 7m tonnes of DAP were imported during 2012 and a spot cargo of Saudi DAP was sold $560/tonne CFR to Indian buyer Zuari last week, marking a $10/tonne increase over last business.
Benchmark US Gulf export prices have gained traction after a $80/tonne slump at the end of 2011 and DAP prices are set to move up off the back of tightening supply after healthy offtake for the domestic spring application.
North African export prices are likely to increase, given DAP production at the Groupe Chimique Tunisien plant in Gabes has largely halted due to continue civil unrest, tightening supply from the region.
The impasse is expected to be resolved early June.
High levels of inventory will cover immediate requirements for the July kharif planting season (summer or monsoon season), but DAP and phosphoric imports will be needed to replenish depleted stocks.
The IFA conference runs from 20-23 May.
For more information on phosphates and other fertilizers, please visit icis pricing fertilizers
Follow Karen Thomas on | http://www.icis.com/Articles/2012/05/21/9561445/ifa-12-indian-delegation-withdraws-in-phosphate-price.html | CC-MAIN-2015-14 | en | refinedweb |
On Fri, 30 Nov 2001 09:01, Richard Emberson wrote:
> The major part it to allow tasks to take over the parsing of the build.xml
> document possibly
> by allowing them to load their own DocumentHandlers. This requires the
> changes I
> outlined and could be done before namespace support (making sure there is
> no name collisions).
I am not sure we want to do this in Ant1 as it highly couples ant tasks to
XML libraries. We have discussed this in the past - and in fact a year and a
half ago I was pushing for much the same thing. In ant2 we will hopefully
allow tasks to get access to a representation of themselves (ie a TaskModel).
--> | http://mail-archives.apache.org/mod_mbox/ant-dev/200111.mbox/%3C200111301701.fAUH13u15254@mail012.syd.optusnet.com.au%3E | CC-MAIN-2015-14 | en | refinedweb |
# set some nicer defaults for matplotlib from matplotlib import rcParams #these colors come from colorbrewer2.org. Each is an RGB triplet dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667), (0.8509803921568627, 0.37254901960784315, 0.00784313725490196), (0.4588235294117647, 0.4392156862745098, 0.7019607843137254), (0.9058823529411765, 0.1607843137254902, 0.5411764705882353), (0.4, 0.6509803921568628, 0.11764705882352941), (0.9019607843137255, 0.6705882352941176, 0.00784313725490196), (0.6509803921568628, 0.4627450980392157, 0.11372549019607843), (0.4, 0.4, 0.4)] rcParams['figure.figsize'] = (10, 6) rcParams['figure.dpi'] = 150 rcParams['axes.color_cycle'] = dark2_colors rcParams['lines.linewidth'] = 2 rcParams['axes.grid'] =:
Your Answer Here()
Briefly summarize these graphs -- how accurate is the typical poll a day before the election? How often does a prediction one month before the election mispredict the actual winner?
Your summary here
You are (finally!) in a position to do some quantitative analysis.
We have provided an
error_data function that builds upon the functions you have written. It computes a new DataFrame with information about polling errors.
Use
error_data,
find_governer_races, and
pd.concat to construct a Data Frame summarizing the forecast errors
from all the Governor races
Hint
It's best to set
ignore_index=True in
pd.concat
def party_from_color(color): if color in ['#0000CC', '#3B5998']: return 'democrat' if color in ['#FF0000', '#D30015']: return 'republican' return 'other' def error_data(url): """ Given a Governor race URL, download the poll data and race result, and construct a DataFrame with the following columns: candidate: Name of the candidate forecast_length: Number of days before the election percentage: The percent of poll votes a candidate has. Normalized to that the canddidate percentages add to 100% error: Difference between percentage and actual race reulst party: Political party of the candidate The data are resampled as necessary, to provide one data point per day """ id = id_from_url(url) xml = get_poll_xml(id) colors = plot_colors(xml) if len(colors) == 0: return pd.DataFrame() df = rcp_poll_data(xml) result = race_result(url) #remove non-letter characters from columns df = df.rename(columns={c: _strip(c) for c in df.columns}) for k, v in result.items(): result[_strip(k)] = v candidates = [c for c in df.columns if c is not 'date'] #turn into a timeseries... df.index = df.date #...so that we can resample at regular, daily intervals df = df.resample('D') df = df.dropna() #compute forecast length in days #(assuming that last forecast happens on the day of the election, for simplicity) forecast_length = (df.date.max() - df.date).values forecast_length = forecast_length / np.timedelta64(1, 'D') # convert to number of days #compute forecast error errors = {} normalized = {} poll_lead = {} for c in candidates: #turn raw percentage into percentage of poll votes corr = df[c].values / df[candidates].sum(axis=1).values * 100. err = corr - result[_strip(c)] normalized[c] = corr errors[c] = err n = forecast_length.size result = {} result['percentage'] = np.hstack(normalized[c] for c in candidates) result['error'] = np.hstack(errors[c] for c in candidates) result['candidate'] = np.hstack(np.repeat(c, n) for c in candidates) result['party'] = np.hstack(np.repeat(party_from_color(colors[_strip(c)]), n) for c in candidates) result['forecast_length'] = np.hstack(forecast_length for _ in candidates) result = pd.DataFrame(result) return result
""" function --------- all_error_data Calls error_data on all races from find_governer_races(page), and concatenates into a single DataFrame Parameters ----------- None Examples -------- df = all_error_data() """ #your code here
errors = all_error_data()
Here's a histogram of the error of every polling measurement in the data
errors.error.hist(bins=50) plt.xlabel("Polling Error") plt.ylabel('N')
#your code here
#your code here
Bootstrap resampling is a general purpose way to use empirical data like the
errors DataFrame to estimate uncertainties. For example, consider the Viriginia Governor Race. If we wanted to estimate how likey it is that McAuliffe will win given the current RCP data, the approch would be:
errors.error. We are assuming that these numbers represent a reasonable error distribution for the current poll data.
Bootstrapping isn't foolproof: it makes the assumption that the previous Governor race errors are representative of the Virginia race, and it does a bad job at estimating very rare events (with only ~30 races in the errors DataFrame, it would be hard to accurately predict probabilities for 1-in-a-million scenarios). Nevertheless, it's a versatile technique.
Use bootstrap resampling to estimate how likely it is that each candidate could win the following races.
Summarize your results in a paragraph. What conclusions do you draw from the bootstrap analysis, and what assumptions did you make in reaching this conclusion. What are some limitations of this analysis?
#your code here
Your summary here
For comparison, most of the predictions in Nate Silver's presidental forecast had confidences of >95%. This is more precise than what we can estimate from the RCP poll alone. His approach, however, is the same basic idea (albeit he used many more polls, and carefully calibrated each based on demographic and other information). Homework 2 will dive into some of his techniques further.
To submit your homework, create a folder named lastname_firstinitial_hw0 and place this notebook file in the folder. If your notebook requires any additional data files to run (it shouldn't), add them to this directory as well. Compress the folder (please use .zip compression) and submit to the CS109 dropbox in the appropriate folder. If we cannot access your work because these directions are not followed correctly, we will not grade your work.
css tweaks in this cell | http://nbviewer.ipython.org/github/cs109/content/blob/master/HW1.ipynb | CC-MAIN-2015-14 | en | refinedweb |
So I have been trying to get OpenCV to work. I displayed some pictures in windows and simple stuff like that. Then, following along in my book, I attempted to write a program which would take video from a webcam and place it in a video. Here is my code:
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
main( int argc, char** argv ){
cvNamedWindow( "Webcam", CV_WINDOW_AUTOSIZE );
CvCapture* capture;
This is a blog to help David Chetrit figure out the parts he will need to build his robot.
Ultrasonic sensor: -$2.99
MCU:
So I am going to add a computer to my robot project so that I will be able to use OpenCV and Sphinx. I will be using this motherboard:. This processor:.
Ok!
So in another forum I explained how I was using power screwdriver motors to move my rocker bogie suspension system. So I was wondering if you guys had any ideas on how to dampen the acoustic noise(not the electrical noise) coming from these motors. They use planetary gearboxes which I know are notorious for being loud but I need the torque that they provide. I was planning on using some sound proofing material to dampen the sound but I was wondering if you had any ideas. I know it is kind of a weird topic but thanks for your help.
I just wanted to start a discussion about how you find time to build robots. I know that I love building robots but sometimes I can not find any time to do this hobby..
So a friend recently asked me if it would be possible to replace the USB jack on a set of headphones with a Mic jack so it could be plugged into and ipod. I said I wasn't sure. I know it isn't really robot related but it was an electronics question and I figured this was a good place to ask it. Thanks | http://letsmakerobots.com/user/14431/pages | CC-MAIN-2015-14 | en | refinedweb |
Joe Orton wrote:
>.
>
>
>.
> some technical issues:
> - sha code should use apr_uint64_t etc types
OK.
> - code style!?
So where are the emacs macros for code style?
> - the odd C++ // comment
Deliberate - I knew I'd get nagged about them so a good way of not
dropping the ball :-)
> - there's already a SHA implementation in apr-util, is
> there duplication here which could be reduced?
Definitely - I was not aware of that, I could look at extending that
instead.
> - the SHA* functions need to be namespaced if they are not static
I was planning to make them static in the long run.
Cheers,
Ben.
--
"There is no limit to what a man can do or how far he can go if he
doesn't mind who gets the credit." - Robert Woodruff | http://mail-archives.apache.org/mod_mbox/apr-dev/200311.mbox/%3C3FA4931D.6060903@algroup.co.uk%3E | CC-MAIN-2015-14 | en | refinedweb |
ftw.activity 1.1.4
An activity feed for Plone.
ftw.activity
ftw.activity provides a view with an activity stream for Plone.
How it works
The feed is based on a simple catalog query restricted to the current context, ordered by modified date so that the newest modifications are on top.
Limitations:
- The ordering by modified date is not exactly accurate since the precision of the catalog index is only down to the minute.
- Since it is based on a catalog query each object only appears once even when it is edited multiple times in a row.
- Only existing objects are listed, so deleting objects will not appear in the feed at all.
- Only actions which change the modification date are covered.
- We do not register any links or actions, so that you can integrate it into Plone as you like. See the usage sections.
Usage
- Add ftw.activity as dependency to your package (setup.py) or to your buildout configuration:
[instance] eggs += ftw.activity
- Install the generic import profile in Plone’s addons control panel.
Once the package is installed there is no link to the view. The view is available as /activity on any context, so you might want to place a link anywhere you like or add an action.
For collections, ftw.activity registers an Activity view which can be selected as default view. Just make sure to sort the collection by modification date and reverse it for best experience.
ftw.activity also registers an ftw.tabbedview tab with the name tabbedview_view-activity.
Custom event representations
By default the each event is represented by some metadata (e.g. author with portrait, action, etc) and the title of the modified object.
If you’d like to display more information you can do this by registering a custom representation adapter in your custom code.
Register the adapter in your ZCML:
<adapter factory=".activity.IssueResponseRepresentation" for="..interfaces.IIssueResponse *" provides="ftw.activity.interfaces.IActivityRepresentation" />
create the adapter class (example ./activity.py):
from ftw.activity.browser.representations import DefaultRepresentation from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile class IssueResponseRepresentation(DefaultRepresentation): index = ViewPageTemplateFile('templates/issue_representation.pt') # helper methods when needed
and a template (example ./templates/issue_representation.pt):
<metal:wrapper <metal:CONTENT <div class="issue-text" tal: </metal:CONTENT> </metal:wrapper>
take a look at the activity_macros for details on what slots you can fill.
Links
Changelog
1.1.4 (2015-03-25)
- Fix width of too wide images. [Kevin Bieri]
1.1.3 (2014-11-18)
- Fixes a bug where the activity view crashed when the modifying user is no longer available. [mbaechtold]
1.1.2 (2014-09-24)
- Ignore comments in activity view. Fixes a bug where the activity view crashed when comments were added. [jone]
1.1.1 (2014-09-24)
- Empty brown-bag release.
1.1.0 (2014-09-04)
- Add support for collections. [jone]
1.0.0 (2014-09-03)
- Initial implementation.
- Downloads (All Versions):
- 100 downloads in the last day
- 310 downloads in the last week
- 593 downloads in the last month
- Author: 4teamwork AG
- Keywords: ftw activity feed
- License: GPL2
- Categories
- Package Index Owner: jone, maethu, 4teamwork, buchi
- DOAP record: ftw.activity-1.1.4.xml | https://pypi.python.org/pypi/ftw.activity | CC-MAIN-2015-14 | en | refinedweb |
Understanding data access
This topic has been updated for the Orchard 1.9 release.
Data access in an Orchard project is different than data access in a traditional web application, because the data model is built through code rather than through a database management system. You define your data properties in code and the Orchard framework builds the database components to persist the data. If you need to change the data structure, you write code that specifies the changes, and those changes are then propagated by that code to the database system. This code-centric model includes layers of abstraction that permit you to reuse components in different content types and to add or change behaviors without breaking other layers.
The key concepts of data access are the following:
- Records
- Data migrations
- Content handlers
- Content drivers
Records
A record is a class that represents the database schema for a content part. To create a record, you define a class that derives from
ContentPartRecord and add the properties that you need in order to store data for the content part. Each property must be virtual. For example, a
Map part might include the following record:
namespace Map.Models { public class MapRecord : ContentPartRecord { public virtual double Latitude { get; set; } public virtual double Longitude { get; set; } } }
Typically, the record class resides in a folder named Models. The parent class,
ContentPartRecord, also includes a property named
id and a reference to the content item object. Therefore, an instance of the
MapRecord class includes not just
Latitude and
Longitude but also the
id property and the content item object that is used to maintain the relationships between the part and other content.
When you define a content part, you use the record as shown below:
namespace Maps.Models { public class MapPart : ContentPart<MapRecord> { [Required] public double Latitude { get { return Retrieve(r => r.Latitude); } set { Store(r => r.Latitude, value); } } [Required] public double Longitude { get { return Retrieve(r => r.Longitude); } set { Store(r => r.Longitude, value); } } } }
Notice that only data that's relevant to the part is defined in the
MapPart class. You do not define any properties that are needed to maintain the data relationships between
MapPart and other content.
For a complete example of the MapPart, see Writing a Content Part.
Data Migrations
Creating the record class does not create the database table; it only creates a model of the schema. To create the database table, you must write a data migration class.
A data migration class enables you to create and update the schema for a database table. The code in a migration class is executed when an administrator chooses to enable or update the part. The update methods provide a history of changes to the database schema. When an update is available, the site administrator can choose to run the update.
You can create a data migration class by running the following command from the Orchard command line:
codegen datamigration <feature_name>
This command creates a Migrations.cs file in the root of the feature. A
Create method is automatically created in the migration class.
In the
Create method, you use the
SchemaBuilder class to create the database table, as shown below for the
MapPart feature.
In the
Uninstall method you can implement destructive operations that will be executed when the module is uninstalled. Keep in mind that when a module is re-added and enabled after it was uninstalled it will be installed again, thus the
Create method of migrations will also run. void Uninstall() { // Dropping tables can potentially cause data loss for users so be sure to warn them in your module's documentation about the implications. SchemaBuilder.DropTable("MapRecord"); ContentDefinitionManager.DeletePartDefinition(typeof(MapPart).Name); } } }
By including
.ContentPartRecord() with your properties in the definition of the database schema, you ensure that other essential fields are included in the table. In this case, an
id field is included with
Latitude and
Longitude.
The return value is important, because it specifies the version number for the feature. You will use this version number to update the schema.
You can update the database table by adding a method with the naming convention
UpdateFromN, where N is the number of the version to update. The following code shows the migration class with a method that updates version by adding a new column. int UpdateFrom1() { SchemaBuilder.AlterTable("MapRecord", table => table .AddColumn("Description", DbType.String) ); return 2; } } }
The update method returns 2, because after the column is added, the version number is 2. If you have to add another update method, that method would be called
UpdateFrom2().
After you add the update method and run the project the module will be silently & automatically upgraded.
Content Handlers
A content handler is similar to a filter in ASP.NET MVC. In the handler, you define actions for specific events. In a simple content handler, you just define the repository of record objects for the content part, as shown in the following example:
namespace Map.Handlers { public class MapHandler : ContentHandler { public MapHandler(IRepository<MapRecord> repository) { Filters.Add(StorageFilter.For(repository)); } } }
In more advanced content handlers, you define actions that are performed when an event occurs, such as when the feature is published or activated. For more information about content handlers, see Understanding Content Handlers.
Content Drivers
A content driver is similar to a controller in ASP.NET MVC. It contains code that is specific to a content part type and is usually involved in creating data shapes for different conditions, such as display or edit modes. Typically, you override the Display and Editor methods to return the ContentShapeResult object for your scenario.
For an example of using a content driver, see Accessing and Rendering Shapes. | http://docs.orchardproject.net/Documentation/Understanding-data-access | CC-MAIN-2015-14 | en | refinedweb |
Hello,
after starting over with implemented HDR for my new engine, I'm experiencing an issue I had the last time too: The scenes adaptation reacts mostly entierly to the luminance in the center of the scene, not the averaged value. Take a look at the two screenshots. The first one shows a fully adapated scene. The second one is almost exactly the same, though slightly moved to the right. Now obviously the shadowy part of that object is in the middle, and that seems to drag the "average" luminance I calculated to become extremely low, therefore the scene flashes in all white. If I move the camera slightly to eigther the right, left, up or down, it becomes "normal" again. This happens everywheren, whenever there is a very dark/bright part in the middle of the screen, the adaptation goes nuts. Therefore I deduce that the average luminance calculation is broken. I've even got multiple different settings, and all produce this same behaviour:
- DirectX11, shader:
#include "../../../Base3D/Effects/Vertex.afx" sampler InputSampler : register(s0); Texture2D <float4> Luminance : register(t0); float4 mainPS(VS_OUTPUT i) : SV_TARGET0 { float4 inTex = Luminance.Sample(InputSampler, i.vTex0); return inTex; };
I'm using this shader with a linear sampler to downsample the scenes luminance by 2times until there is only a 1x1 target left.
- DirectX11 auto mipmap generation:
Since the downsampling first produced the issue, I decided to try out auto-mipmap generation. It still produces exactly the same effect, the luminance in the middle of the scene almost entirely determines the avg luminance.
#include "../../../Base3D/Effects/Vertex.afx" cbuffer instance : register(b2) { float2 params; // x = delta, y = miplevel } sampler InputSampler : register(s0); Texture2D <float> CurrentLum : register(t0); Texture2D <float> PreviousLum : register(t1); float4 mainPS(VS_OUTPUT i) : SV_TARGET0 { float fAdaptedLum = PreviousLum.Sample(InputSampler, i.vTex0); float fCurrentLum = CurrentLum.SampleLevel(InputSampler, i.vTex0, (int)params.y); const float fTau = 0.5f; float fNewAdaptation = fAdaptedLum + (fCurrentLum - fAdaptedLum) * (1 - exp(-params.x * fTau)); return float4(fNewAdaptation, 0.0f, 0.0, 1.0); };
- DirectX9, shader (almost identically to the DX11 one), also the same result.
Now, is there anything I'm missing? The way I used to do this has been taken from an old NVIDIA sample, but this one doesn't even get gamma-correction right, so I doubt it is accurate... | http://www.gamedev.net/topic/649824-hdr-adaptation-avg-lum-isnt-calculated-properly/ | CC-MAIN-2015-14 | en | refinedweb |
This chapter covers the most basic steps taken in any JDBC application. It also describes additional basic features of Java and JDBC supported by the Oracle JDBC drivers.
The following topics are discussed: following
import statements at the beginning of your program (
java.math only if needed):
Import the following Oracle packages when you want to access the extended functionality provided by the Oracle drivers. However, they are not required for the example presented in this section:
For an overview of the Oracle extensions to the JDBC standard, see Chapter 6, "Overview of Oracle Extensions".
You must provide the code to register your installed driver with your program. You do this with the static
registerDriver() method of the JDBC
DriverManager class. This class provides a basic service for managing a set of JDBC drivers.
Because you are using one of Oracle's JDBC drivers, you declare a specific driver name string to
registerDriver(). You register the driver only once in your Java application.
DriverManager.registerDriver (new oracle.jdbc.OracleDriver());
Open a connection to the database with the static
getConnection() method of the JDBC
DriverManager class. This method returns an object of the JDBC
Connection class that needs as input a user name, password, connect string that identifies the JDBC driver to use, and the name of the database to which you want to connect.
Connecting to a database is a step where you must enter Oracle JDBC driver-specific information in the
getConnection() method. If you are not familiar with this method, continue reading the "Understanding the Forms of getConnection()" section below.
If you are already familiar with the
getConnection() method, you can skip ahead to either of these sections, depending on the driver you installed:
The
DriverManager class
getConnection() method whose signatures and functionality are described in the following sections:
If you want to specify a database name in the connection, it must be in one of the following formats:
TNSNAMESentry (OCI driver only)
For information on how to specify a keyword-value pair or a
TNSNAMES entry, see your Oracle Net Services Administrator's Guide.
The following signature takes the URL, user name, and password as separate parameters:
getConnection(String
URL, String
user, String
Where the URL is of the form:
jdbc:oracle:<
drivertype>:@<
database>
The following example connects user
scott with password
tiger to a database with
INSTANCE_NAME
orcl through port 1521 of host
myhost, using the Thin driver.
Connection conn = DriverManager.getConnection ("jdbc:oracle:thin:@myhost:1521:orcl", "scott", "tiger");
If you want to use the default connection for an OCI driver, specify either:
Connection conn = DriverManager.getConnection ("jdbc:oracle:oci:scott/tiger@");
or:
Connection conn = DriverManager.getConnection ("jdbc:oracle:oci:@", "scott", "tiger");
For all JDBC drivers, you can also specify the database with a Oracle Net keyword-value pair. The Oracle Net keyword-value pair substitutes for the
TNSNAMES entry. The following example uses the same parameters as the preceding example, but in the keyword-value format:
Connection conn = DriverManager.getConnection (jdbc:oracle:oci:@MyHostString","scott","tiger");
or:
Connection conn = DriverManager.getConnection ("jdbc:oracle:oci:@(description=(address=(host= myhost) (protocol=tcp)(port=1521))(connect_data=(INSTANCE_NAME=orcl)))", "scott", "tiger");
The following signature takes the URL, user name, and password all as part of a URL parameter:
getConnection(String
URL);
Where the URL is of the form:
jdbc:oracle:<
drivertype>:<
user>/<(); info.put ("user", "scott"); info.put (
"tiger"); info.put ("defaultRowPrefetch","15"); getConnection ("jdbc:oracle:oci:@",info);
Table 3-1 lists the connection properties that Oracle JDBC drivers support.
See Table 18-4, "OCI Driver Client Parameters for Encryption and Integrity" and Table 18-5, "Thin Driver Client Parameters for Encryption and Integrity" for descriptions of encryption and integrity drivers.
To specify the role (mode) for
sys logon, use the
internal_logon connection property. (See Table 3-1, "Connection Properties Recognized by Oracle JDBC Drivers", for a complete description of this connection property.) To logon as
sys, set the
internal_logon connection property to
sysdba or
sysoper.
The following example illustrates how to use the
internal_logon and
sysdba arguments to specify
sys logon.
//import packages and register the driver import java.sql.*; import java.math.*; DriverManager.registerDriver (new oracle.jdbc.OracleDriver()); //specify the properties object java.util.Properties info = new java.util.Properties(); info.put ("user", "sys"); info.put ("password", "change_on_install"); info.put ("internal_logon","sysdba"); //specify the connection object Connection conn = DriverManager.getConnection ("jdbc:oracle:thin:@database",info); ...
Some of these properties are for use with Oracle performance extensions. Setting these properties is equivalent to using corresponding methods on the
OracleConnection object, as follows:
defaultRowPrefetchproperty is equivalent to calling
setDefaultRowPrefetch().
See "Oracle Row Prefetching".
remarksReportingproperty is equivalent to calling
setRemarksReporting().
See "DatabaseMetaData TABLE_REMARKS Reporting".
defaultBatchValueproperty.*; DriverManager.registerDriver (new oracle.jdbc.OracleDriver()); //specify the properties object java.util.Properties info = new java.util.Properties(); info.put ("user", "scott"); info.put ("password", "tiger"); info.put ("defaultRowPrefetch","20"); info.put ("defaultBatchValue", "5"); //specify the connection object Connection conn = DriverManager.getConnection ("jdbc:oracle:thin:@database",info); ...
For the JDBC OCI driver, you can specify the database by a
TNSNAMES entry. You can find the available
TNSNAMES entries listed in the file
tnsnames.ora on the client computer from which you are connecting. On Windows NT, this file is located in the
[ORACLE_HOME]\NETWORK\ADMIN directory. On UNIX systems, you can find it in the
/var/opt/oracle directory.
For example, if you want to connect to the database on host
myhost as user
scott with password
tiger that has a
TNSNAMES entry of
MyHostString, enter:
Connection conn = DriverManager.getConnection ("jdbc:oracle:oci:@MyHostString", "scott", "tiger");
Note that both the "
:" and "
@" characters are necessary.
For the JDBC OCI and Thin drivers, you can also specify the database with a Oracle Net keyword-value pair. This is less readable than a
TNSNAMES entry but does not depend on the accuracy of the
TNSNAMES.ORA file. The Oracle Net keyword-value pair also works:@(description=(address=(host= myhost) (protocol=tcp)(port=1521))(connect_data=(INSTANCE_NAME=orcl)))", "scott", "tiger");
Because you can use the JDBC Thin driver in applets that do not depend on an Oracle client installation, you cannot use a
TNSNAMES entry to identify the database to which you want to connect. You have to either:
or:
For example, use this string if you want to connect to the database on host
myhost that has a TCP/IP listener on port 1521 for the database
SID (system identifier)
orcl. You can logon as user
scott, with password
tiger:
Connection conn = DriverManager.getConnection ("jdbc:oracle:thin:@myhost:1521:orcl", "scott", "tiger");
You can also specify the database with a Oracle Net keyword-value pair. This is less readable than the first version, but also works with the other JDBC drivers.
Connection conn = DriverManager.getConnection ("jdbc:oracle:thin:@(description=(address=(host=myhost) (protocol=tcp)(port=1521))(connect_data=(INSTANCE_NAME=orcl)))", "scott", a result set or statement releases the corresponding cursor in the database.
setXXX() methods on the
PreparedStatement object to bind data into the prepared statement to be sent to the database. The various
setXXX() methods are described in "Standard setObject() and Oracle.
You must close your connection to the database once you finish your work. Use the
close() method of the
Connection object to do this:
conn.close();
The steps in the preceding sections are illustrated in the following example, which registers an Oracle JDBC Thin driver,
Connection statement with the following:
Connection conn = DriverManager.getConnection ("jdbc:oracle:oci:@MyHostString", "scott", "tiger");
Where
MyHostString is an entry in the
TNSNAMES.ORA file.
The.
This.
UTF-16encoding. This corresponds to the
getUnicodeStream()method.
The methods
getBinaryStream(),
getAsciiStream(), and
getUnicodeStream() return the bytes of data in an
InputStream object. These methods are described in greater detail in Chapter 8, and Globalization Support". Globalization Support". both SQL92 escape syntax and Oracle PL/SQL block;
Your invocation call in your JDBC program should look like:
Connection conn = DriverManager.getConnection ("jdbc:oracle:oci:@<hoststring>", "scott", "tiger");). See the Oracle9i Java Stored Procedures Developer's Guide for more information on writing, publishing, and using Java stored procedures. B, "JDBC Error Messages".
Errors originating in the RDBMS are documented in the Oracle9i().
printStackTrace() | http://docs.oracle.com/cd/B10501_01/java.920/a96654/basic.htm | CC-MAIN-2015-14 | en | refinedweb |
wmemcmp - compare wide-characters in memory
#include <wchar.h> int wmemcmp(const wchar_t *ws1, const wchar_t *ws2, size_t n);
The wmemcmp() function compares the first n wide-characters of the object pointed to by ws1 to the first n wide-characters of the object pointed to by ws behaves as if the two objects compare equal.
The wmemcmp() function returns an integer greater than, equal to, or less than zero, accordingly as the object pointed to by ws1 is greater than, equal to, or less than the object pointed to by ws2.
No errors are defined.
None.
None.
None.
<wchar.h>, wmemchr(), wmemcpy(), wmemmove(), wmemset().
Derived from the ISO/IEC 9899:1990/Amendment 1:1995 (E). | http://pubs.opengroup.org/onlinepubs/7990989775/xsh/wmemcmp.html | CC-MAIN-2015-14 | en | refinedweb |
Diversified Consumer Services
Company Overview of 5Point Professional Services
Company Overview
5Point Professional Services, an IT training company, owns and operates a network of learning centers that provide computer and business skills training courses to corporations, individuals, and government agencies in the United States and internationally. It offers public classroom-based and on-site training courses in the areas of desktop applications, technical products, Web and graphics design, Web development, process management, business skills development, and professional development. The company is based in Austin, Texas.
300 East Highland Mall Boulevard
Suite 100
Austin, TX 78752
United States
Phone:
512-349-9555
512-349-2047
Key Executives for 5Point Professional Services
5Point Professional Services does not have any Key Executives recorded.
Similar Private Companies By Industry
Recent Private Companies Transactions
Most Searched Private Companies
Sponsored Financial Commentaries
Sponsored Links
To contact 5Point Professional. | http://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapid=111590797 | CC-MAIN-2015-14 | en | refinedweb |
Davinci
A package to convert any widget to an image which can be saved locally or can be shared to other apps and chats.
📹 Preview
ℹ️ Usage
Prerequisites
on.
on Android
android.permission.WRITE_EXTERNAL_STORAGE- Permission for usage of external storage
Imports
import 'package:davinci/core/davinci_capture.dart'; import 'package:davinci/davinci.dart';
Pseudo code
By default the generated image name will be "davinci". But if you wish to change it, you can pass the image name in DavinciCapture.click method.
await DavinciCapture.click(imageKey, fileName: "Hello");
When the image is captured, you can either open the image preview or get the image in Uint8List.
await DavinciCapture.click(imageKey, fileName: "Hello", openFilePreview = false, returnImageUint8List = true);
If the captured image is pixelated, calculate the pixel ratio dynamically based on the device and pass it to the DavinciCapture.click method.
double pixelRatio = MediaQuery.of(context).devicePixelRatio; await DavinciCapture.click(imageKey, fileName: "Hello", pixelRatio: pixelRatio);
To save the image directly to the device, set
saveToDevice flag to
true. You can also specify the album name or you can leave it undefined.
await DavinciCapture.click(imageKey, fileName: "Hello", saveToDevice = true, album: "Davinci", openFilePreview = false);
ℹ️ All the parameters in the
click method is present in
offStage method too.
🛎️ Note :
- Cannot capture Platformview due to issue
- If you wish to save the generated images locally, do remember to add necessary permissions in
info.plistand
AndroidManifest.xml. | https://pub.dev/documentation/davinci/latest/ | CC-MAIN-2021-39 | en | refinedweb |
Warning: You are browsing the documentation for Symfony 4.2, which is no longer maintained.
Read the updated version of this page for Symfony 5.3 (the current stable version).
The How to Use the Messenger))
Example:
use App\Message\MyMessage; use Symfony\Component\Messenger\Handler\HandlersLocator; use Symfony\Component\Messenger\MessageBus; use Symfony\Component\Messenger\Middleware\HandleMessageMiddleware; $bus = new MessageBus([ new HandleMessageMiddleware(new HandlersLocator([ MyMessage::class => ['dummy' => ' => ['my_serialization_groups'], ])) );
At the moment, the Symfony Messenger has the following built-in envelope stamps:
Symfony\Component\Messenger\Stamp\SerializerStamp, to configure the serialization groups used by the transport.
Symfony\Component\Messenger\Stamp\ValidationStamp, to configure the validation groups used when the validation middleware is enabled.\HandledStamp, a stamp that marks the message as handled by a specific handler. Allows accessing the handler returned value, the handler callable name and its alias if available from the
Symfony\Component\Messenger\Handler\HandlersLocator.
Instead of dealing directly with the messages in the middleware you receive the envelope. Hence you can inspect the envelope content and its stamps, or add any:
use App\Message\Stamp\AnotherStamp;
Symfony\Component\Messenger\Transport\Sender\SenderInterface,
you can create your own message sender:
namespace App\MessageSender; use App\Message\ImportantAction; use Symfony\Component\Messenger\Envelope; use Symfony\Component\Messenger\Transport\Sender\SenderInterface; class ImportantActionToEmailSender implements SenderInterface { private $mailer; private $toEmail; public function __construct(\Swift_Mailer \Swift_Message('Important action made')) ->setTo($this->toEmail) ->setBody( '<h1>Important action</h1><p>Made by '.$message->getUsername().'</p>', 'text/html' ) ); } }. | https://symfony.com/index.php/doc/4.2/components/messenger.html | CC-MAIN-2021-39 | en | refinedweb |
.
What is
-betterC, and Why do I Need it?
The short answer is that most D programmers don’t need it. The longer answer is that it does two things: first, it restricts the language to a lower-level subset (that’s still higher-level than C), and, second, it changes the implementation of compiled code a little so that it only depends on the C runtime, and not the D runtime.
If you just want control over things like GC and runtime features for performance reasons, you can already get it
without
-betterC. You can read more about that in this GC series on the official blog, and in my previous post about the D runtime itself.
What
-betterC does provide is an intermediate
language that integrates very well with both C code and D code. Walter envisions this as a way for D to penetrate more
into parts of the software world that are still dominated by C. For example, practically all languages today still run
on top of a layer of operating system libraries that are written in C (and C++ in the Windows world). D’s runtime
itself depends on this layer, so D can’t ever replace C unless runtimeless programming is possible.
The
-betterC switch is a little controversial, and I agree
that things could be better in the future. But
-betterC is
here today, and ultimately we’re only going to figure out how to use D as a better C by trying it out. That’s why I
originally published that post about runtimeless D, even though it was a horrible hack.
What’s New?
There are two main ways betterC programming has improved since I last wrote about it. One pain point was the
over-dependence on runtime reflection in the language implementation, even for things like integer array comparisons
that could be implemented with just
memcmp(). A lot of work
has been done since then to replace reflection with templates, which is good news even for programmers who aren’t doing
low-level stuff. Lucia Cojocaru presented some of this work at
DConf 2017.
The other area of improvement is in the
-betterC switch
itself. Back then there were only two places in the DMD compiler where
-betterC had any effect at all, so most runtime dependencies were left
in. Simply defining a struct, for example, would still cause the D compiler to insert
TypeInfo instances for runtime type information, which depend on base
class implementations that are defined in the D runtime library.
assert statements would still be implemented using a D runtime
implementation, not the C runtime implementation. These are the two most obvious problems that have been fixed.
-betterC Take Two
In that old post, I took some D code, compiled it, hacked out the runtime, and then linked it directly to some C code without the D runtime. Let’s see how things work now. Here’s the D code again:
module count; @nogc: nothrow: import core.atomic : atomicOp, atomicLoad; extern(C) { int count() { scope(exit) counter.addOne(); return counter.getValue(); } } private: shared struct AtomicCounter(T) { void addOne() pure { atomicOp!"+="(_v, 1); } int getValue() const pure { return atomicLoad(_v); } private: T _v; } unittest { shared test_counter = AtomicCounter!int(42); assert (test_counter.getValue() == 42); test_counter.addOne(); assert (test_counter.getValue() == 43); } shared counter = AtomicCounter!int(1);
And here’s the C code:
#include <stdio.h> int count(); // From the D code int main() { int j; for (j = 0; j < 10; j++) { printf("%d\n", count()); } return 0; }
Here’s what happens now (on a GNU/Linux system):
$ dmd --version DMD64 D Compiler v2.076.0-b2-dirty Copyright (c) 1999-2017 by Digital Mars written by Walter Bright $ ls count.d program.c $ dmd -c -betterC count.d $ gcc count.o program.c -o program count.o:(.data.DW.ref.__dmd_personality_v0+0x0): undefined reference to `__dmd_personality_v0' collect2: error: ld returned 1 exit status
Damn. So close. The D compiler has left in some exception handling data structures, even though
-betterC isn’t supposed to support exceptions. You’ll see I’m using the
new DMD beta, and there’s already an open bug report and pull
request for this kind of problem, so I expect it’ll be fixed soon.
I’ll update this post when it
is.
Here’s a quick workaround for now (read the original linker hacking article for an explanation):
$ objcopy -R .data.DW.ref.__dmd_personality_v0 -R .eh_frame count.o $ gcc count.o program.c -o program $ ./program 1 2 3 4 5 6 7 8 9 10
It might not look like much, but it’s a huge improvement. Thanks to all the developers who helped make it happen. | https://theartofmachinery.com/2017/08/26/new_betterc.html | CC-MAIN-2021-39 | en | refinedweb |
( developed for SP2013 on-prem, Nintex forms 2.10 )
This code was inspired by nmarples whose amazing blog post can be found here. He lifts the curtain on the Nintex rules engine and shows the power of combining Javascript with rules, providing clues to this realtime validation concept.
You can implement this solution right away on any form control without any additional configuration, cut and paste as shown below using these 4 simple steps;
1) make sure each form field control is named and the label is associated with the control:
2) paste this CSS in the form's Custom CSS setting:
3) paste this Javascript in the form's Custom JavaScript setting:
lpForm = {
Validate: function( formControlCall, sourceContext, required, isValid ) {
// Obtain the internal element input name.
var formControlID = formControlCall.split( "'" )[1] || "";
var internalElement = sourceContext.find( "[name^='ctl'][formcontrolid='" + formControlID + "']" );
// During initial load, people control is a placeholder, no internalElement exists yet, so bail, validation will succeed later
if ( internalElement.length == 0 ) return;
var internalElement_name = internalElement.attr( 'name' );
// Scrub the name.
var intext = internalElement_name;
var outtext = intext.split( '$' ).join( '_' );
// Obtain the label associated for-name of internal element.
var labelBorder = NWF$( "[for^='" + outtext + "']" )
.closest( '.nf-filler-control-border' )
.parent();
// Required missing.
if( required && isValid ) labelBorder
.addClass( 'lp-border-red' ).removeClass( 'lp-border-yell' ).removeClass( 'lp-border-grn' );
// Optional missing.
if( !required && isValid ) labelBorder
.addClass( 'lp-border-yell' ).removeClass( 'lp-border-red' ).removeClass( 'lp-border-grn' );
// Not missing.
if( !isValid ) labelBorder
.addClass( 'lp-border-grn' ).removeClass( 'lp-border-yell' ).removeClass( 'lp-border-red' );
return isValid;
},
};
4) paste this expression into a new rule for each form field you wish to validate
optional : inside the expression, change true to false to change the field from required to optional
note : in the Condition window {Control:Self} will format itself to appear as {Self} once saved
Done - Enjoy!
The above code is a prototype created in an afternoon. There are probably some edge cases that may require additional coding, but this appears to be suitable for all basic form controls.
The 3 important concepts from nmarples that he shares in his blog post are:
"{Control:Self}"and pass the resulting expression as a boolean parameter
{Control:Self} == ''Nintex Runtime Functions this expression is the same as isNullOrEmpty({Control:Self})
3 classes are used to set the associated control back-grounds to red, yellow or green.
lpForm : creates a unique namespace where I can safely use any name for my function.
Validate : takes 4 parameters.
formControlCall : a reference to the currently active control
sourceContext : a reference to the context of the DOM
required : true or false ( whether this form field is required or optional )
isValid : true or false ( the result of the provided validation expression - already evaluated )
ctl00$PlaceHolderMain$formFiller$FormView$ctl26$ctl16$ac5d43fc_51e7_4d06_b3e8_150731c4bac9
The immediately-invoked function expression gathers the following run-time provided parameters:
a reference to the current control : "{Control:Self}"
and a the validation expression : {Control:Self} == ''
So the IIFE evaluates these two parameters and passes results into the interior function, were all four parameters get passed to the base Validation function. Then the base function Validation ultimately returns back the isValid boolean, which could be consumed by the Rule too.
This is awesome! Thank you so much for sharing and great work!
This is wonderful. Will definitely be using this.
Great to see how all the ideas and knowledge sharing spreads across the community
That's pretty cool!
Minor refactor ...apparently, in edit mode, a populated people control only has an empty placeholder. So when the first wave of validation hits this control fails to validate and pops an error in an alert-box. ( the control must be waiting to finish a call to display person deets ). But once populated, validation is triggers again and all if well. So I needed to add an escape hatch, return from function when DOM query length is 0. | https://community.nintex.com/t5/Community-blogs/Creating-Real-Time-Validation-on-Nintex-Forms/ba-p/80073 | CC-MAIN-2021-39 | en | refinedweb |
I have never used Gatsby, but I found myself reading about this issue on hydration bugs on production sites. Long story short: due to Gatsby preferring fast development you will not get hydration warnings of server side generated content by React during development. Thus you miss hydration issues which result into broken layout.
Instead of focusing on Gatsby I'm going to bring up the whole generic universal JavaScript issue when working with SSR and SPA, and suggest a solution to the hydration problem. Wouldn't be a programmer if I didn't!
Core of the issue
When doing this sort of universal rendering in React you have three modes:
- Server static HTML generation / "Server mode"
- Client hydration of DOM (based on the parsed static HTML) / "Hydrate mode"
- Client side renders / "SPA mode"
When in SPA mode you can render whatever you like: any logic or condition you use is free to use to figure out what to render. This is where you're in when doing development on Gatsby, or "hot reloading" as it was used to be called when first introduced to the React world.
A more challenging part is understanding React processed on server, and React hydration on existing DOM. People may come up with stuff like this:
function MyComponent({ isLoggedIn }) { if (typeof window === 'undefined') { return null } if (isLoggedIn) { return 'Hello world!' } return 'Please login!' }
The idea of this code is to target situation when you're doing a server mode render. A problem arises during hydrate mode, because you won't end up rendering
null there and instead jump into client side rendering that is aware of client only state. This results into a mismatch with server render and client render, and during development React will warn you about this upon refreshing the page (
Warning: Expected server HTML to contain a matching <x> in <y>).
The way to work around this issue is to understand you have to treat server mode and hydrate mode as equals. We can call it initial render to make it a bit easier as a mental model, because regardless of running React on server or client it is the first render. It is also typically the render where there is no user specific state. We do this to optimize server performance by caching the HTML.
A bad thing about React is that they do not provide a tool to know if we are done with the initial render or not! There is no function that you could call. This has then resulted into people working around the issue on their own, and not always ending up with the most bulletproof results.
A naive solution
If you're having difficulties getting a hang of the above please read on how to avoid rehydration problems. It is a longer take on the problem but gives another kind of view on this issue which may work better for some people.
The suggested solution in the article is the following:
function ClientOnly({ children, ...delegated }) { const [hasMounted, setHasMounted] = React.useState(false) React.useEffect(() => { setHasMounted(true) }, []) if (!hasMounted) { return null } return ( <div {...delegated}> {children} </div> ) }
The problem here is that this is component specific! This means
hasMounted will always be
false during the first render of a component even when we are already in SPA mode and we are no longer doing initial render of the App. This means you will have two renders in SPA mode even though you'd need only one. This of course isn't most often a big issue, but these things can become one and ideally you should always go for the least work done possible.
There are also solutions that avoid
useState such as this useIsMounted implementation, but with that you instead have a problem that in the context of having finished initial render you're not getting a second render so that the UI would reflect SPA state. You would show the static HTML until something forces a re-render. And the main use of
isMounted is to know whether you can still be finishing with asynchronous operations or not, so the earlier code above calling it
hasMounted is not really matching what
isMounted is really meant for.
A solution without double renders on SPA mode
Years ago in a codebase I worked with I ended up with a simple Redux solution to this issue: I added a
serverMode boolean to the root of the store and then changed it in the main
App component:
// NOTE: default value for serverMode = true class App extends React.Component { componentDidMount() { this.props.dispatch(setServerMode(false)) } } // or as hooks: function App() { const dispatch = useDispatch() useEffect(() => { dispatch(setServerMode(false)) }, []) }
Which then enabled to check for the condition:
function MyComponent() { const serverMode = useSelector(state => state.serverMode) if (serverMode) { return null } return 'Now in SPA mode' }
The advantage here is that now once we have hydrated we will always be in SPA mode and thus will not have the issue that we would have using
useIsMounted with double renders. And we get to detect the initial render.
The downside is that we now have a dependency on Redux with essentially something that is just a single boolean that will never be changed again, but the component is still registered to listen to the Redux store changes even if it otherwise wouldn't need to be connected to Redux. This can be harmful to performance.
Getting hooked and solving all the issues
As we're using hooks with modern React it would make sense to make something similar to
useIsMounted, but make it global to the App and name it more appropriately for the use. For this we can make use of
useState with a single call
useEffect and add an abstraction that uses the Context API so that we can create a solution similar to what I did with Redux and
serverMode previously.
const HydrateContext = createContext('hydrated') export function useIsHydrated() { return useContext(HydrateContext) } export function IsHydratedProvider({ children }) { const [isHydrated, setIsHydrated] = useState(false) useEffect(() => { setIsHydrated(true) }, []) return ( <HydrateContext.Provider value={isHydrated}> {children} </HydrateContext.Provider> ) }
We do not need to ever restore it back to
false as this value is intended to be
true after hydration for the lifetime of the App, and once App has gone we don't the value for any other use. We only need it to "force" the second render after hydrating static HTML. In comparison
isMounted is appropriate for the cases where asynchronous operations might still be flying.
As for using the above code:
function MyComponent() { const isHydrated = useIsHydrated() return !isHydrated ? 'Initial render' : 'SPA mode' } function App() { return ( <IsHydratedProvider> <MyComponent /> </IsHydratedProvider> ) }
Now we no longer depend on Redux and there will ever be only one time after initial render on client side that the SPA mode is toggled on where we can fill stuff based on state we only have access to on the client, such as greetings with the user's name.
Now I only hope I didn't screw up with the above as I was lazy here to boot up a new SSR + SPA React app to make sure the code works! I blame the summer heat for the laziness :)
The remaining open topic is actually naming related: is
isHydrated clear enough, or would reversing the boolean value and call it
isInitialRender be better? I guess that might be perfectly up to the reader to decide :)
Discussion (1)
Thank you for the article. Seems to me, the same DOM elements switch to be controlled by server first and then the client. Is that right? | https://practicaldev-herokuapp-com.global.ssl.fastly.net/merri/understanding-react-ssr-spa-hydration-1hcf | CC-MAIN-2021-39 | en | refinedweb |
In previous articles we used requests and BeautifulSoup to scrape the data. Scraping data this way is slow (Using selenium is even slower).
Sometimes we need data quickly. But if we try to speed up the process of scraping data using multi-threading or any other technique, we will start getting http status 429 i.e. too may requests. We might get banned from the site as well.
Purpose of this article is to scrape lots of data quickly without getting banned and we will do this by using docker cluster of celery and RabbitMQ along with Tor.
For this to achieve we will follow below steps:
Let's start.
Dockerfile used to build the worker image is using python:3 docker image.
Directory structure of code:
. --- celery_main | --- celery.py | --- __init__.py | --- task_receiver.py | --- task_submitter.py --- docker-compose.yml --- dockerfile --- README.md --- requirements.txt 1 directory, 8 files
Run the below command to start the docker cluster:
sudo docker-compose up
This will run one container for each worker and RabbitMQ. Once you see something like
worker_1 | [2018-03-01 10:46:30,013: INFO/MainProcess] celery@5af881b83b97 ready.
Now you can submit the tasks. But before going any further lets try to understand the code while it is simple and small.
from celery import Celery app = Celery( 'celery_main', broker='amqp://myuser:mypassword@rabbit:5672', backend='rpc://', include=['celery_main.task_receiver'] )).
The third argument is backend. A backend in Celery is used for storing the task results.
task_submitter.py:
from .task_receiver import do_work if __name__ == '__main__': for i in range(10): result = do_work.delay(i) print('task submitted' + str(i))
This code will submit the tasks to workers. We need to call
do_work method with delay so that it can be executed in async manner.
Flow returns immediately without waiting for result. If you try to print the result without waiting, it will print
None.
task_receiver.py:
from celery_main.celery import app import time import random @app.task(bind=True,default_retry_delay=10) def do_work(self, item): print('Task received ' + str(item)) # sleep for random seconds to simulate a really long task time.sleep(random.randint(1, 3)) result = item + item return result
We can easily create a task from any callable by using the
task() decorator. This is what we are doing here.
bind=True means the first argument to the task will always be the task instance (self). Bound tasks are needed for retries, for accessing information about the current task request.
sudo docker-compose up.
We will not be running containers in detached mode (-d ) as we need to see the output. By default it will create one worker.
In another terminal go inside the worker container using command
sudo docker exec -it [container-name] bash. It will start the bash session in working directory defined by
WORKDIR in dockerfile.
Run the task submitter by using command
python -m celery_main.task_submitter. Task submitter will submit the tasks to workers and exit without waiting for results.
You can see the output (info, debug and warnings) in previous terminal. Find out how much seconds cluster took to complete 10 tasks.
Now stop all containers, remove them and restart them. But this time keep the worker count to 10. Use command
sudo docker-compose up --scale worker=10.
Repeat the process and find the time taken to complete the tasks. Repeat above step by changing the worker count and concurrency value in dockerfile to find the best value for your machine where it took least time.
Increasing concurrency value beyond a limit will no longer improve the performance as workers will keep switching the context instead of doing actual job. Similarly increasing the worker count beyond a limit will make your machine go unresponsive. Keep a tab on CPU and memory consumed by running top command in another terminal.
All the twitter handles are in
handles.txt file placed in root directory of code.
Update the
task_submitter.py file to read the handles and submit them to to the task receiver.
Task Receiver will get the response from twitter and parse the response to extract the tweets available on first page. For simplicity we are not going to the second page.
Code to extract the tweets is as below:
@app.task(bind=True,default_retry_delay=10) def do_work(self, handle): print('handle received ' + handle) url = "" + handle session = requests.Session() response = session.get(url, timeout=5) print("-- STATUS " + str(response.status_code) + " -- " + url) if response.status_code == 200: parse_tweets(response, handle) def parse_tweets(response, handle): soup = BeautifulSoup(response.text, 'lxml') tweets_list = list() tweets = soup.find_all("li", {"data-item-type": "tweet"}) for tweet in tweets: tweets_list.append(get_tweet_text(tweet)) print(str(len(tweets_list)) + " tweets found.") # save to DB or flat files. def get_tweet_text(tweet): try: tweet_text_box = tweet.find("p", {"class": "TweetTextSize TweetTextSize--normal js-tweet-text tweet-text"}) images_in_tweet_tag = tweet_text_box.find_all("a", {"class": "twitter-timeline-link u-hidden"}) tweet_text = tweet_text_box.text for image_in_tweet_tag in images_in_tweet_tag: tweet_text = tweet_text.replace(image_in_tweet_tag.text, '') return tweet_text except Exception as e: return None
Now if you run this code, it will start throwing too many requests i.e. HTTP status 429 error after few hits. To avoid this we need to use tor network to send the requests from different IPs and we will also use different user agent in each request.
git clone
- Build the image and use the same name in docker-compose file.
rproxy: hostname: rproxy image: anuragrana/rotating-proxy environment: - tors=25 ports: - "5566:5566" - "4444:4444"
- You may skip above steps as docker image with tag used in docker-compose is already present in docker hub.
- Create a file
proxy.py and write the below code in it.
import requests import user_agents import random def get_session(): session = requests.session() session.proxies = {'http': 'rproxy:5566', 'https': 'rproxy:5566'} session.headers = get_headers() return session def get_headers(): headers = { "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", "accept-language": "en-GB,en-US;q=0.9,en;q=0.8", "User-Agent": random.choice(user_agents.useragents) } return headers
- Create a new file
user_agents.py. This will contain the list of user agents and we will use one of these, selected randomly, in each request.
If you will run the container now, IP will be changed after every few requests and user agent will be changed on each hit, resulting in almost zero 429 status responses. | https://pythoncircle.com/post/518/scraping-10000-tweets-in-60-seconds-using-celery-rabbitmq-and-docker-cluster-with-rotating-proxy/ | CC-MAIN-2021-39 | en | refinedweb |
#include <rte_compat.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
Go to the source code of this file.
ixgbe PMD specific functions.
Definition in file rte_pmd_ixgbe.h.
Response sent back to ixgbe driver from user app after callback
Definition at line 657 of file rte_pmd_ixgbe.h.
Notify VF when PF link status changes.
Set the VF MAC address.
Enable/Disable VF VLAN anti spoofing.
Enable/Disable VF MAC anti spoofing.
Enable/Disable vf vlan insert
Enable/Disable tx loopback
set all queues drop enable bit
set drop enable bit in the VF split rx control register
Enable/Disable vf vlan strip for all queues in a pool
Enable MACsec offload.
Disable MACsec offload.
Configure Tx SC (Secure Connection).
Configure Rx SC (Secure Connection).
Enable Tx SA (Secure Association).
Enable Rx SA (Secure Association).
Set RX L2 Filtering mode of a VF of an Ethernet device.
Enable or disable a VF traffic receive of an Ethernet device.
Enable or disable a VF traffic transmit of the Ethernet device.
Enable/Disable hardware VF VLAN filtering by an Ethernet device of received VLAN packets tagged with a given VLAN Tag Identifier.
Set the rate limitation for a vf on an Ethernet device.
Set all the TCs' bandwidth weight.
The bw_weight means the percentage occupied by the TC. It can be taken as the relative min bandwidth setting.
Initialize bypass logic. This function needs to be called before executing any other bypass API.
Return bypass state.
Set bypass state
Return bypass state when given event occurs.
Set bypass state when given event occurs.
Set bypass watchdog timeout count.
Get bypass firmware version.
Return bypass watchdog timeout in seconds
Reset bypass watchdog timer
Acquire swfw semaphore lock for MDIO access
Release swfw semaphore lock used for MDIO access
Read PHY register using MDIO without MDIO lock The lock must be taken separately before calling this API
Write data to PHY register using without MDIO lock The lock must be taken separately before calling this API
Get port fdir info
Get port fdir status | https://doc.dpdk.org/api-20.11/rte__pmd__ixgbe_8h.html | CC-MAIN-2021-39 | en | refinedweb |
AnalogSmoother (community library)
Summary
Smooth analog input readings through a running average buffer. Provides a single method that reads the value of an analog input and returns the average of the last N readings.
Example Build Testing
Device OS Version:
This table is generated from an automated build. Success only indicates that the code compiled successfully.
Library Read Me
This content is provided by the library maintainer and has not been validated or approved.
AnalogSmoother
This library employs a running average to smooth analog input readings.
#include "AnalogSmoother.h" AnalogSmoother sensor(A0, 10); // Buffer size of 10 readings void setup() { Serial.begin(115200); // Optional, fill the buffer with actual readings from the sensor sensor.fill(); } void loop() { // Draw 2 lines in the serial plotter Serial.print(analogRead(A0)); Serial.print(" "); Serial.println(sensor.read()); // Slow down the serial output delay(10); }
Browse Library Files | https://docs.particle.io/cards/libraries/a/AnalogSmoother/ | CC-MAIN-2021-39 | en | refinedweb |
TrivialDBTrivialDB
TrivialDB is a lightweight key/value json storage with persistence. Conceptually, it's just a thin lodash wrapper around plain javascript objects; with the added bonus of doing versioned asynchronous writes on changes. Its on disk format is simply "json on disk"; basically the json version of the plain object, saved to a file on disk. This makes making hand edits not just possible, but simple.
Use CaseUse Case
TrivialDB is intended for simple storage needs. It's in-process, small, and very, very fast. It takes almost nothing to get up and running with it, and it gives you an impressive amount of power, thanks to lodash chaining. I've found its a great fit for any personal project that needs to persist data. If you find yourself wanting to work with raw json files, it's a rather large improvement on writing your own loading/saving/querying logic.
The one caveat to keep in mind is this: every database your work with is stored in memory. Since TrivialDB is in-process, you might run into the memory limits of node; before v12, on a 64 bit machine, this is 1.76GB by default. (You can increase this via
--max_old_space_size=<size>.) In practice, however, this isn't actually that much of a limitation. Generally, you're working with a large amount of your data in memory anyway; your data sets can get relatively large before you even need to worry about this.
In fact, the very popular nosql database Redis is in-memory. In their FAQ, they have this to say:
In the past the Redis developers experimented with Virtual Memory and other systems in order to allow larger than RAM datasets, but after all we are very happy if we can do one thing well: data served from memory, disk used for storage. So for now there are no plans to create an on disk backend for Redis. Most of what Redis is, after all, is a direct result of its current design.
In practice, I use TrivialDB to power a wiki that has thousands of printed pages worth of text, and the node process uses around 200mb, with the json being around 1mb on disk. For things like a blog, or user database, or session storage, or a preference system, TrivialDB will work for a long time before you need to move to something out of process.
The one caveat to keep in mind is this: every database you work with is stored in memory. Since TrivialDB is in-process, you might run into the memory limits of node; (on versions before 0.12 there's a 1.4GB - 1.7GB limit). However, this isn't actually that much of a limitation. Generally, you're working with a large amount of your data in memory anyway; your data sets can get relatively large before you even need to worry about this.
In-Browser DatabaseIn-Browser Database
One of the new and exciting use cases is that TrivialDB is now usable inside a browser! By default it will read/write JSON over REST, but you can easily change this to use IndexedDB or LocalStorage. You can even use a bundler like Browserify or Webpack to include the JSON directly, and have zero load time.
This helps when developing static, "server-less" sites; you can have a development version that generates the JSON locally, commit it to git, and then have your static site generation simply include the new JSON files and push them out. Your client-side code can still work with TrivialDB as if it was a normal application.
(For more information, please see the "Reading and Writing in a Browser" section.)
Lodash ShoutoutLodash Shoutout
This entire project is made possible by the lodash project. If it wasn't for their hard work and the effort they put into building an amazing API, TrivialDB would not exist.
InstallationInstallation
Simply install with npm:
$ npm install --save trivialdb
TrivialDB APITrivialDB API
There are two concepts to remember with TrivialDB: namespaces and databases. A 'namespace' is, as it implies, just an isolated environment with a name. Inside a namespace, all database names must be unique. So, if you want to have to independent 'foobar' databases, you will need to have them in different namespaces.
Databases, on the other hand, are the heart and soul of TrivialDB. As the name implies, they hold all your data. Database objects are the interesting ones, with the main API you will be working with in TrivialDB.
Creating a namespaceCreating a namespace
ns(name, options)- creates or retrieves a
TDBNamespaceobject.
- alias: 'namespace'
const trivialdb = ;// Create a namespaceconst ns1 = triviadb;// Create a namespace with some optionsconst ns2 = triviadb;// Create a database inside that namespaceconst db = ns1;
Once you've created your namespace object, you can create or retrieve database instances from it, just like you can the main TrivialDB module.
OptionsOptions
The options supported by the
ns call are:
basePath: "..." // The base path for all other paths to be relative to. (Defaults to the application's base directory.)dbPath: "..." // The path, relative to `basePath` to the root database folder. (Defaults to 'db'.)
If you call
ns passing in the name of an existing namespace, any options passed will be ignored.
Creating a databaseCreating a database
db(name, options)- creates or retrieves a database instance.
- alias: 'database'
const trivialdb = ;// Open or create a databaseconst db = trivialdb;// Open or create a database, with optionsconst db2 = trivialdb;
By default, when a new database is created, it will look for a file named
'some_db.json' inside the database folder.
(By default this is
'<application>/db'. You can control this path by setting the
basePath or
dbPath options of the
namespace, or alternatively, the
dbPath or
rootPath options of the database.)
You can request the same database multiple times, and get back the same instance (though any options passed on subsequent calls will be ignored). This allows you to request the database by name in different places in your code, and not worry about the two database instance fighting with each other.
OptionsOptions
The options supported by the
db call are:
writeToDisk: true || false // Whether or not to persist the database to disk. (Default: `true`)loadFromDisk: true || false // Whether or not to read the database in from disk on load. (Default: `true`)rootPath: "..." // The path to a folder that will contain the persisted database json files. (Default: './')dbPath: "..." // The path, relative to the namespace's `basePath` to the root database folder. (Defaults to 'db'.)writeDelay: ... // A number in milliseconds to wait between writes to the disk. (Default: 0)prettyPrint: true || false // Whether or not the json on disk should be pretty printed. (Default: `true`)pk: "..." // The field in the object to use as the primary key. (Default: `undefined`){...} // The function to use to generate unique ids.
If you call
db passing in the name of an existing namespace, any options passed will be ignored.
Namespace APINamespace API
Namespaces have exactly one function,
db, which works exactly like the TrivialDB function for creating a database.
(see above.)
Database APIDatabase API
TrivialDB database objects have two APIs, one synchronous, the other asynchronous (Promise based). The synchronous API is significantly faster, but it does not trigger syncing to disk, and should be considered a 'dirty' form of reading and writing. In the future, TrivialDB may get the ability to support multiple processes sharing the same file, and at that time, the synchronous API will be a truly dirty API, with the values often being out of date. (See the more in depth discussion in each relevant section below.)
PropertiesProperties
The database object has the following properties:
name- The name given to the database. (Also the filename, minus extension.)
count- The number of keys in the database.
path- The full path to the backing file, assuming it writes to disk.
rootPath- The full path to the folder for the database (aka
pathminus the filename).
loading- A promise that is resolved once the initial data is loaded.
Database OptionsDatabase Options
There are some options that deserve further details.
Custom ID GenerationCustom ID Generation
If you want to generate your own ids, and not use the ids TrivialDB generates by default, you can specify your own
function in the database options. By specifying
idFunc, TrivialDB will use this function to generate all ids, when needed.
The
idFunc function is passed the object, so you can generate ids based on the object's content, if you wish. (An
example of this would be generating a slug from an article's name.)
{return articlename;} // end slugify// Declare a new database, using the slugify function above.const db = trivialdb;// Now, we save an objectdb;
Be careful; it is up to you to ensure your generated ids are unique. Additionally, if your generation function blows up, TrivialDB may return some nonsensical errors. (This may improve in the future.)
readFunc and
writeFunc
You can override the built in underlying read and/or write functions. By default these will read/write from the disk (in node) or
GET/
POST to the specified path in the browser. You can, however, override them with any
Promise returning function.
readFunc(path)- This function is passed the absolute path of the file as
path. The
rootDirwill be
/on browser, or the root directory of the running node process. This function must return a
Promise. The promise's return value is ignored.
writeFunc(path, jsonStr)- This function is passed the absolute path of the file as
path, and the json string representation of the database as
jsonStr. The
rootDirwill be
/on browser, or the root directory of the running node process. This function must return a
Promise. The promise's return value is ignored.
As long as
loadFromDisk (or
writeToDisk) is not set to false, TrivialDB will attempt to load a database when you
first call
.db(). Sometimes, you need to wait for the datbase to be loaded before doing operations. This is why we
provide a
loading promise on the db object. Waiting for the database to be done is very simple:
// This could declare a new DB, or it could be pulling an existing one from the cache.const db = trivialdb;// This will execute once the db is loaded. If it is already loaded, we resolve instantly.dbloading;
It's worth mentioning that the
loading promise is always available, even if disk operations will not be performed.
This means you can always wait on the
loading promise, without knowing details about how the database is configured.
loaded event
If you don't want to use the loading promise, there is also a
loaded event that is always fired off once the database
has finished loading. This event does still fire if there's no disk operations to do.
// This could declare a new DB, or it could be pulling an existing one from the cache.const db = trivialdb;// This will execute once the db is loaded. If it is already loaded, **this will never fire.**db;
Note: Due to the nature of events, if the database has already loaded, listening for the
loaded event will never
trigger. There is no way to know, other than to call
db.loading.isPending(), at which point you should probably just
use the promise directly.
Reading and Writing in a BrowserReading and Writing in a Browser
By default, in node, TrivialDB will attempt to read and write using the
fs library. The path will be relative to the
project's absolute path. However, in a browser, we can't use
fs. So, instead, we use the fetch api to make REST
calls. The
path is relative to
/, and will look something like
/db/namespace/some_db.json.
For loading the database, it will make a
GET request, and for writing, it will make a
POST request. If you need to
do something different, like using
PUT or maybe transmitting data over websockets, simply override the
readFunc
and/or
writeFunc options in the configuration.
Key/Value APIKey/Value API
The synchronous API follows a scheme of
get,
set,
del. Primarily, these functions work with the internal memory store
directly, meaning that in the case of
set or
del, thier changes will not be persisted until something else triggers
a write to disk. If you have set
writeToDisk to
false, then you can use these APIs without any concern at all.
The asynchronous API follows a scheme of
load,
save,
remove. These functions are always considered safe; they will
not resolve their promises until after the changes have been successfully saved to disk. (They will, however, modify the
data immediately, so dirty reads/writes may occur while the safe read/write is pending, and it will get the updated
value.)
It should be noted that currently,
get and
load are only differentiated by the fact that
load returns a promise.
In the future,
load may be modified to sync from disk, allowing for multiple processes to write to the same json file.
This is important to keep in mind, as
get is a very popular function, if you are in a multiprocess scenario in the
future, it may return stale values. As such, it should be considered a dirty read.
Retrieving ValuesRetrieving Values
- Synchronous
get(key, defaultVal)- Returns the value stored under
keyor
undefined.
- Asynchronous
load(key)- Returns a promise resolved to the value or throws
DocumentNotFoundError.
// Get an object synchronouslyconst val = db;// Get an object synchronously, with a default valueconst val2 = db;// Get an object asynchronouslydb;// Get an object asynchronously, with a default valuedb;// Get an object asynchronously that doesn't existdb;
TrivialDB only supports direct retrieval by a single string identifier. If a value for that key is not found,
undefined
will be returned for
get (This mirrors the direct use of objects in JavaScript); additionally, we allow you to pass in
a default value, which will be returned if the key is not found. This is not supported on
load, however, as
load
is intended to return you exactly what is in the database at this moment. If you attempt to use
load to get a
document that does not exist, it will throw a
DocumentNotFoundError object. This allows you to do traditional promise
error handling, as opposed to using
if(result === undefiend).
Storing ValuesStoring Values
- Synchronous
set(value)- Returns a generated key.
set(key, value)- Returns
key.
- Asynchronous
save(value)- Returns a promise resolved with a generated key.
save(key, value)- Returns a promise resolved with
key.
// Store a valueconst id = db;// Store a value with a specific keydb;// Overwrite the previous valuedb;// Asynchronously store a valuedb;
All values in TrivialDB are stored under a key. They may be objects, or primitive types. If you do not pass in a key, TrivialDB will generate one for you. (The autogenerated keys are base62 encoded uuids, basically the same algorithm use by url shorteners.) In the event you do not pass a key, your will need to look at the return value to know how to retrieve your objects.
If you specify a key, it is up to you to ensure it's unique. TrivialDB will silently overwrite any previous value.
TrivialDb supports the
pk option for setting a primary key. Keys are always added if your value is an object, but
with the
pk options, you can control what field it is stored under. (By default, it's
id.)
Removing ValuesRemoving Values
- Synchronous
del(predicate)- Returns a list of removed values.
- Asynchronous
remove(predicate)- Returns a promise resolved with a list of removed values.
Removing values works off a lodash predicate, must like filter. This allows for removing multiple documents at the
same time. However, if you only wish to remove one, you will need to pass in an object that selects your primary key,
for example:
{ id: 'my_key' }.
Deleting the databaseDeleting the database
clear()- Returns a promise resolved once the database is considered 'settled'.
In addition to removing an individual key, you can clear the entire database. This always syncs to disk.
Query APIQuery API
Instead of exposing a large, complex Query API, TrivialDB exposes lodash chain objects, allowing you to perform lodash queries to filter and manipulate your data in any way you want. As this uses lazy evaluation, it's fast and efficient even on large datasets.
Note: TrivialDB currently uses explicit chaining, meaning that you must always use
.run()/
.value(). Please
check the docs to understand the full implications of this.
Basic FilteringBasic Filtering
filter(predicate)- Returns the values that match the predicate.
// Simple object filterconst vals = db;// Function filterconst vals2 = db;
TrivialDB has a simple filter function for when you just want a lodash filter. It works as you would expect, filtering all items in the database by the predicate you passed in.
Advanced QueriesAdvanced Queries
query()- Returns a lodash chain object, wrapped around all values in the database.
// Query for all admins, sorting by created dateconst items = db;// Find the most recently created userconst latestUser = db;
This exposes a lodash chain object, which allows you to run whatever lodash queries you want. It clones the database's values, so feel free to make any modifications you desire; you will not affect the data in the database.
Note: As you can see from our example, we execute the query with
.run(). This alias was removed in Lodash 4. We
jump through a few hoops to extend the prototype of the individual chain object to add this back in there; this should
not leak into the global lodash module. Why did we do this? Because I like the semantics of
.run(), dammit.
ReloadReload
reload()- Returns a promise resolved once the database has been reloaded from disk.
If you need to reload your database for any reason (such as hand-edited JSON files), you can reload the database from
disk with the
reload() function. This is the same function that is used to load from disk initially.
This function resets the
loaded event once complete.
Note: This will throw an exception on any database with
loadFromDisk: false.
Note: This will completely throw away all values from in memory. If saving is not settled, changes may be lost.
Direct AccessDirect Access
sync()- Returns a promise resolved once the database is considered 'settled'.
You can directly access the key/value store with the
values property on the database instance. This is exposed
explicitly to allow you as much freedom to work with your data as you might want. However, TrivialDB can't detect any
changes you make directly, so you will need to call the
sync function to get your changes to persist to disk.
// Add a new key manuallydbvalues'foobar' = test: "something" ;// Sync that new key to diskdb;
The
sync function returns a promise that is resolved once the database has 'settled', as in, there are no more
scheduled writes. Because of this behavior, you should consider whether or not you want to wait on its promise. Under
high load, (or with a high
writeDelay) it's possible for a
sync promise's resolution to be considerably delayed.
// Add a new key manuallydbvalues'foobar' = test: "something" ;// Sync that new key to diskdb;
Also, you should feel free to iterate over the values object if you need to do any advanced filtering. All the same
caveats of working with a plain javascript object apply. Just remember to call
sync if you've made any modifications.
StatusStatus
With the release of v2.0.0, v1.x is no longer supported. Additionally, there were large, breaking API changes.
TrivialDB is stable and production ready (for the intended use case). I will provide support for v2.x for the foreseeable future. I will even attempt to help with v1.x if you're using it in a production product, but I can't make any promises.
If you are using this in a production product, please get in touch. Not only would I love to know, but if you need direct support, I'd be more than willing to discuss it.
UpdatesUpdates
Since the code base is small enough, it's relatively immune to the most common forms of 'code rot'. I make improvements when they're needed, or if someone files an issue. Just because I haven't touched it in a year or two doesn't mean it's dead; if you're concerned, feel free to file an issue and ask if it's still being supported.
ContributingContributing
While I only work on TrivialDB in my spare time (what little there is), I use it for several of my projects. I'm more than happy to accept merge requests, and/or any issues filed. If you want to fork it and improve part of the API, I'm ok with that too, however I ask you open an issue to discuss your proposed changes first. And, since it's MIT licensed, you can of course take the code and use it in your own projects.
DonationsDonations
I accept donations for my work. While this is not my primary means of income, by any stretch, I would not mind a few bucks if you find the software useful. | https://preview.npmjs.com/package/trivialdb | CC-MAIN-2021-39 | en | refinedweb |
Every interface defined in a HIDL package has its own autogenerated C++ class inside its package's namespace. Clients and servers deal with interfaces in different ways:
- Servers implement interfaces.
- Clients call methods on interfaces.
Interfaces can either be registered by name by the server or passed as parameters to HIDL-defined methods. For example, framework code may serve an interface to receive asynchronous messages from the HAL and pass that interface directly to the HAL without registering it.
Server implementation
A server implementing the
IFoo interface must include the
IFoo header file that was autogenerated:
#include <android/hardware/samples/1.0/IFoo.h>
The header is automatically exported by the shared library of the
IFoo interface to link against. Example
IFoo.hal:
// IFoo.hal interface IFoo { someMethod() generates (vec<uint32_t>); ... }
Example skeleton for a server implementation of the IFoo interface:
// From the IFoo.h header using android::hardware::samples::V1_0::IFoo; class FooImpl : public IFoo { Return<void> someMethod(foo my_foo, someMethod_cb _cb) { vec<uint32_t> return_data; // Compute return_data _cb(return_data); return Void(); } ... };
To make the implementation of a server interface available to a client, you can:
- Register the interface implementation with the
hwservicemanager(see details below),
OR
- Pass the interface implementation as an argument of an interface method (for detals, see Asynchronous callbacks).
When registering the interface implementation, the
hwservicemanager process keeps track of registered HIDL interfaces
running on the device by name and version. Servers can register a HIDL interface
implementation by name and clients can request service implementations by name
and version. This process serves the HIDL interface
android.hidl.manager@1.0::IServiceManager.
Each auto-generated HIDL interface header file (such as
IFoo.h)
has a
registerAsService() method that can be used to register the
interface implementation with the
hwservicemanager. The only
required argument is the name of the interface implementations as clients will
use this name to retrieve the interface from the
hwservicemanager
later:
::android::sp<IFoo> myFoo = new FooImpl(); ::android::sp<IFoo> mySecondFoo = new FooAnotherImpl(); status_t status = myFoo->registerAsService(); status_t anotherStatus = mySecondFoo->registerAsService("another_foo");
The
hwservicemanager treats the combination of
[package@version::interface, instance_name] as unique to enable
different interfaces (or different versions of the same interface) to register
with identical instance names without conflicts. If you call
registerAsService() with the exact same package version, interface,
and instance name, the
hwservicemanager drops its reference to the
previously registered service and uses the new one.
Client implementation
Just as the server does, a client must
#include every interface
it refers to:
#include <android/hardware/samples/1.0/IFoo.h>
A client can obtain an interface in two ways:
- Through
I<InterfaceName>::getService(via the
hwservicemanager)
- Through an interface method
Each autogenerated interface header file has a static
getService
method that can be used to retrieve a service instance from the
hwservicemanager:
// getService will return nullptr if the service can't be found sp<IFoo> myFoo = IFoo::getService(); sp<IFoo> myAlternateFoo = IFoo::getService("another_foo");
Now the client has an an
IFoo interface, and can call methods to
it as if it were a local class implementation. In reality, the implementation
may run in the same process, a different process, or even on another device
(with HAL remoting). Because the client called
getService on an
IFoo object included from version
1.0 of the package,
the
hwservicemanager returns a server implementation only if that
implementation is compatible with
1.0 clients. In practice, this
means only server implementations with version
1.n (version
x.(y+1) of an interface must extend (inherit from)
x.y).
Additionally the method
castFrom is provided to cast between
different interfaces. This method works by making an IPC call to the remote
interface to make sure the underlying type is the same as the type that is being
requested. If the requested type is unavailable, then
nullptr is
returned.
sp<V1_0::IFoo> foo1_0 = V1_0::IFoo::getService(); sp<V1_1::IFoo> foo1_1 = V1_1::IFoo::castFrom(foo1_0);
Asynchronous callbacks
Many existing HAL implementations talk to asynchronous hardware, which means they need an asynchronous way to notify clients of new events that have occurred. A HIDL interface can be used as an asynchronous callback because HIDL interface functions can take HIDL interface objects as parameters.
Example interface file
IFooCallback.hal:
package android.hardware.samples@1.0; interface IFooCallback { sendEvent(uint32_t event_id); sendData(vec<uint8_t> data); }
Example new method in
IFoo that takes an
IFooCallback parameter:
package android.hardware.samples@1.0; interface IFoo { struct Foo { int64_t someValue; handle myHandle; }; someMethod(Foo foo) generates (int32_t ret); anotherMethod() generates (vec<uint32_t>); registerCallback(IFooCallback callback); };
The client using the
IFoo interface is the
server of the
IFooCallback interface; it provides an
implementation of
IFooCallback:
class FooCallback : public IFooCallback { Return<void> sendEvent(uint32_t event_id) { // process the event from the HAL } Return<void> sendData(const hidl_vec<uint8_t>& data) { // process data from the HAL } };
It can also simply pass that over an existing instance of the
IFoo interface:
sp<IFooCallback> myFooCallback = new FooCallback(); myFoo.registerCallback(myFooCallback);
The server implementing
IFoo receives this as an
sp<IFooCallback> object. It can store the callback, and call
back into the client whenever it wants to use this interface.
Death recipients
As service implementations can run in a different process, it can happen
that the process implementing an interface dies while the client stays alive.
Any calls on an interface object hosted in a process that has died will fail
with a transport error (
isOK() will return false). The only way to
recover from such a failure is to request a new instance of the service by
calling
I<InterfaceName>::getService(). This works only if
the process that crashed has restarted and re-registered its services with the
servicemanager (which is generally true for HAL implementations).
Instead of dealing with this reactively, clients of an interface can also
register a death recipient to get a notification when a service dies.
To register for such notifications on a retrieved
IFoo interface, a
client can do the following:
foo->linkToDeath(recipient, 1481 /* cookie */);
The
recipient parameter must be an implementation of the
android::hardware::hidl_death_recipient interface provided by HIDL,
which contains a single method
serviceDied() that will be called
from a thread in the RPC threadpool when the process hosting the interface dies:
class MyDeathRecipient : public android::hardware::hidl_death_recipient { virtual void serviceDied(uint64_t cookie, const android::wp<::android::hidl::base::V1_0::IBase>& who) { // Deal with the fact that the service died } }
The
cookie parameter contains the cookie that was passed in with
linkToDeath(), whereas the
who parameter contains a
weak pointer to the object representing the service in the client. With the
sample call given above,
cookie equals 1481, and
who
equals
foo.
It's also possible to unregister a death recipient after registering it:
foo->unlinkToDeath(recipient); | https://source.android.com/devices/architecture/hidl-cpp/interfaces | CC-MAIN-2021-39 | en | refinedweb |
A Swift wrapper to use libogg api.
Intension of this project is to support audio player SDK with a platform independend XCFramework that can be integrated in Swift projects via CocoaPod.
It supports iOS devices and simulators (version 9 to 14) and macOS (versions 10.10 to 11.2).
The supported version of libogg API is 1.3.4.
After integration use
import YbridOgg
in your Swift code.
The Cocoa Podfile of a project using this framework, should look like
platform :ios, '9.0' target 'player-sdk-swift' do use_frameworks! source '' pod 'YbridOgg' end
The Package.swift using this framework should look like
... dependencies: [ .package( name: "YbridOgg", url: "git@github.com:ybrid/ogg-swift.git", from: "0.8.0"), ...
If you manage packages in another way you may download YbridOgg.xcframework.zip from the latest release of this repository and embed it into your own project manually.
Unzip the file into a directory called 'Frameworks' of your XCode project. In the properties editor, drag and drop the directory into the section 'Frameworks, Libraries and Embedded Content' of the target's 'General' tab.
You are welcome to contribute.
This project is under MIT license. It makes use of the sources for ogg from xiph.org/downloads. Ogg is licensed under the New BSD License. See the LICENSE file.
CFramework wraps libogg 1.3.4 to use in Swift.
Changes:
Swiftpack is being maintained by Petr Pavlik | @ptrpavlik | @swiftpackco | API | Analytics | https://swiftpack.co/package/ybrid/ogg-swift | CC-MAIN-2021-39 | en | refinedweb |
Hi All, I am creating multipage streamlit ui…
In one page I have the option to upload File only
In another page there are 10 slider buttons. The user should be able to move around the pages… When the user moves from one page to another, the contents of page1 which has 10 sliders are getting copied in the backgroud… is there any way I could get fresh Page when the user toggles between the pages…
The code is as follows:
main_app.py
import app1 import app2 import streamlit as st PAGES = { "Page 1": app1, "Page 2": app2 } st.sidebar.title('Navigation') selection = st.sidebar.radio("Go to", list(PAGES.keys())) page = PAGES[selection] page.app()
app1.py
import streamlit as st def app(): st.empty() st.title('APP1') st.write('Welcome to app1') for i in range(10): st.slider("Helpful ?", 1, 5, key = str(i)) return
app2.py
import streamlit as st def app(): st.empty() st.title('Welcome to App2') uploaded_file = st.file_uploader("Upload New File")
When we run 1st time Page looks like below
When clicked on Page 2
If we see above, we can see welcome to Page/App 1 in background
When I click on Page 1 again
Now when I click on Page 2:
It goes on forever!!! how can I get rid of this?
Thank You,
Chait | https://discuss.streamlit.io/t/ui-content-retains-on-using-multiple-sliders-buttons/15441 | CC-MAIN-2021-39 | en | refinedweb |
#include <rte_common.h>
#include <rte_mbuf.h>
#include <bpf_def.h>
Go to the source code of this file.
RTE BPF support.
librte_bpf provides a framework to load and execute eBPF bytecode inside user-space dpdk based applications. It supports basic set of features from eBPF spec ().
Definition in file rte_bpf.h.
De-allocate all memory used by this eBPF execution context.
Create a new eBPF execution context and load given BPF code into it.
Create a new eBPF execution context and load BPF code from given ELF file into it. Note that if the function will encounter EBPF_PSEUDO_CALL instruction that references external symbol, it will treat is as standard BPF_CALL to the external helper function.
Execute given BPF bytecode.
Execute given BPF bytecode over a set of input contexts.
Provide information about natively compiled code for given BPF handle. | https://doc.dpdk.org/api-20.11/rte__bpf_8h.html | CC-MAIN-2021-39 | en | refinedweb |
Reranking Results in django-springsteendjango (72), boss (11), springsteen (6)
Continuing on yesterday's post, here is a brief introduction
to reranking results in
django-springsteen as well as coverage of how
it is able to paginate consistently through results collected from multiple sources.
(You can grab the
springsteen source code on GitHub.)
Custom Ranking of Results
If you've ever tried to tune search relevency (particularly on a large or diverse corpus of data), you're probably painfully aware that reranking results is indeed the crux of the problem.
That's why
springsteen exposes a simple hook for reordering
results, and also why it defaults to a simply stacking approach
rather than trying to reorder results for you. You know your data,
you get to tweak its ranking yourself.
Here is a simple--although flawed--example of how to reorder search results.
from springsteen.views import search from springsteen.services import Web, Images import random
def scatter_results(query, results): random.shuffle(results) return results
def scattered_search(request) services = (Images,Web) return search(request, services=services, reranking_func=scatter_results)
There you have it, the simplest reranking function imagineable. But it has a bit of a problem: running the ranking function twice on the same data will return different rankings, which makes predictable pagination impossible.
This brings us to the one requirement for a good reranking algorithm: given the same data, it must return the same rankings. That is to say, they must be consistent. It is not important that the results be consistent from day to day or even hour to hour, but the reranking algorithm must be consistent to the extent that you want predictable pagination.
The less you object to noise in pagination (one result occuring on multiple pages), the more malleable your reranking algorithm can be.
To understand how consistent ranking across pagination is acchieved, we
need to take a sidetrip into the details of pagination in
springsteen.
Consistent Pagination across Sources
The simplest case for pagination is fetching
the 1st through
Nth results. To accomplish this,
springsteen
queries each service for
N results, merges
them together, calls any reranking function on
the results, and then returns the first
N results.
Let's call
N results (or as close to
N as the
service has availale) from all services a batch.
So if we had 5 services, and got
N results from
each of them, then our first batch has
5N results.
This means we have the raw material for five pages of
results to paginate through from that first batch.
Because the user only requested the first
N results,
we rerank them as we please and then return the first
N of the reranked results.
Now, if the user asks for
N through
2N results,
we once again query all our sources for
N results,
once again rerank them, and then return results
N to
2N. This may feel a bit inefficient,
but
springsteen tries to cache results, so
paging through those five pages will only require hitting
each of those services once (assuming all five pages are
displayed within the ~30 minutes that the results are
cached).
Things get bit a more complex if the user asks for results which fall outside the first batch of results. To retrieve those we need to first grab the first batch of results, set them aside, and then continue fetching batches (and setting them aside) until the sum of results in all batches is greater than the final requested result .
Then we perform the reranking algorithm seperately on each batch whose results fall within the first and last result requested by the user. Then we merge together the batches (maintaing their order), and slice out the requested results. It is crucial to sort the batches separately to acchieve consistent ordering of results (which permits predictable pagination).
With an example this is fairly simple. Consider this code:
>>> a = [1,5,2,7,8,9] >>> b = [5,1,3,9,6] >>> sorted(a) + sorted(b) [1, 2, 5, 7, 8, 9, 1, 3, 5, 6, 9] >>> sorted(a+b) [1, 1, 2, 3, 5, 5, 6, 7, 8, 9, 9]
By reranking batches independently we get the first result, whereas by reranking them in unison we recieve the second.
With both algorithms on the first page of results you would see results
1,2,5,7,9.
On the second page, with the first algorithm you would see
1,3,5,6,9.
With the second algorithm you would instead see
5,6,7,8,9,9.
Meaning the second algorithm shows you worse results on the second page, and
also shows repeated results.
Thus ends our brief interlude on pagination.
A Consistent Ranking Algorithm
Here is a simple example of a consistent reranking algorithm. It is not a useful example in terms of increasing search relevency, but is perhaps useful purely as a technical example.
This reranks results by the length of their title.
from springsteen.views import search from springsteen.services import Web, Images
def title_length(query, results): results.sort(lambda a,b : cmp(len(a['title']), len(b['title']))) return results
def scattered_search(request) services = (Images,Web) return search(request, services=services, reranking_func=title_length)
Keep in mind that reranking is consistent within batches, and that batches are consistent with regard to one-another, but that in some given set of results you may see the end of one batch and the beginning of another, which (for a very visual example like this one) is a bit jarring.
In a more realistic situation, you might create a function which assigns each result a grade, and then rerank them based on that grade. You might start out only looking at source, but then factor in its published date (where it exists) and keep tweaking for your corpus.
As you start experimenting a bit, I'm curious to hear your ideas of ranking algorithms. | https://lethain.com/custom-reranking-of-results-and-django-springsteen/ | CC-MAIN-2021-39 | en | refinedweb |
This view controller allows you to share an image the same way as a normal
UIActivityViewController would, with one bonus: The image is actually shown on top of the
UIActivityViewController with a nice blurred background.
You can add any items you want to share, but only the image is displayed.
A SwiftUI adaptation is also available, based on the excellent work in ActivityView.
These screenshots are taken from my app TwoSlideOver. Check it out here
image: The image you want to share and at the same time display as a preview.
activityItems: All the items you want to share, with the
imageincluded.
activities: Optional array of
UIActivity
excludedTypes: Optional array of excluded activity types.
completion: An optional
UIActivityViewController.CompletionWithItemsHandlerto handle any code after completion.
import PSActivityImageViewController ... let activityImageVC = ActivityImageViewController( image: someImage, activityItems: [someImage, self], // or just [someImage] completion: { activity, completed, _, error in if let error = error { print("Error: \(error.localizedDescription)") return } // Do something with the rest of the information. } ) // Important for iPad, as otherwise the app will crash! activityImageVC.popoverPresentationController?.sourceView = someView activityImageVC.popoverPresentationController?.sourceRect = someView.bounds present(activityImageVC, animated: true)
import SwiftUI import PSActivityImageViewController struct ContentView: View { let image = Image("Image") @State var activityItem: ActivityImageItem? = nil var body: some View { VStack(spacing: 16) { image .resizable() .aspectRatio(contentMode: .fit) .padding() Button( action: { activityItem = ActivityImageItem(image: image) }, label: { Text("Share image") } ) .activityImageSheet($activityItem) } .padding() } }
As is the case for
UIActivityViewController, on iPad you need to specify the source for
the
popoverPresentationController.
PSActivityImageViewController is available through Swift Package Manager.
Add it to an existing Xcode project as a package dependency:
PSActivityImageViewController is available under the MIT license. See the LICENSE file for more info.
Swiftpack is being maintained by Petr Pavlik | @ptrpavlik | @swiftpackco | API | Analytics | https://swiftpack.co/package/psalzAppDev/PSActivityImageViewController | CC-MAIN-2021-39 | en | refinedweb |
AWS and Computing Clusters and MPI
Just been curious about parallel computation. Clusters. Gives me a little nerd hard-on.
Working my way up to running some stuff on AWS (Amazon Web Services).
So I’ve been goofing around with mpi. Mpi (message passing interface) is sort of an instant messager for programs to pass around data . It’s got some convenient functions but its mostly pretty low level.
I’ll jot some fast and incomplete notes and examples
Tried to install mpi4py.
sudo pip install mpi4py
but it failed, first had to install openmpi
To install on Mac I had to follow these instructions here. Took about 10 minutes to compile
so mpi4py
give this code a run
#mpirun -np 3 python helloworld.py from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() name = MPI.Get_processor_name() print "Hello. This is rank " + str(rank) + " of " + str(size) + " on processor " + name`
the command mpirun runs a couple instances. You know which instance you are by checking the rank number which in this case is 0 through 2.
Typically rank 0 is some kind of master
lower case methods in mpi4py work kind of like how you’d expect. You can communicate between with comm.send and comm.recv
#mpirun -np 2 python helloworld.py from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() name = MPI.Get_processor_name() if rank == 0: comm.send("fred",dest=1) else: counter = comm.recv(source=0) print counter
However I think the these are toy methods. Apparently they use pickle (python’s fast and dirty file storage library) in the background. On the other hand, maybe since you’re writing in python anyhow, you don’t need the ultimate in performance and just want things to be easy. On the third hand, why are you doing parallel programming if you want things to be easy? On the fourth hand, maybe you
The capital letter mpi functions are the ones that are better, but they are not pythony. They are direct translations of the C api which uses no returns values. Instead you pass along pointers to the variables you want to be filled.
from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() name = MPI.Get_processor_name() nprank = np.array(float(rank)) result = np.zeros(1) comm.Reduce(nprank, result, op=MPI.SUM, root=0) if rank == 0: print result | https://www.philipzucker.com/aws-and-computing-clusters-and-mpi/ | CC-MAIN-2021-39 | en | refinedweb |
Conditional rendering on React helps you build your apps avoiding unnecessary renders depending on some validations, and it can be used on tooltips, modals, drawer menus, etcetera. But, if we do it wrong, we can end up losing performance instead of improving our app.
It's pretty common to see something like this:
import React, {useState} from 'react'; export const MyComponent = ({}) => { const [show, setShow] = useState(false); return ( <p>This is my main component</p> <MyChildComponent show={show} /> ) } export const MyChildComponent = ({show}) => { return show ? <p>This is my child component</p> : null; }
That is a mistake that can potentially decrease a lot the performance of your application. Why? Because this is not conditional rendering, what we are doing in this example is returning a NULL component or, in other words, a component that renders NULL.
But you guys may think "Yeah, but...It's null, so it doesn't do anything". Au Contraire my friend, and the reason relies on its name NULL COMPONENT, and what does a component have? Right, a lifecycle. So, when we return a Null component we still have a full lifecycle that will trigger depending on what we do on their parent component.
- The true Conditional Rendering:
To avoid these problems the correct way to do is to do the conditionals on the parent component to avoid even call that child component. We're gonna be using the same example:
import React, {useState} from 'react'; export const MyComponent = ({}) => { const [show, setShow] = useState(false); return ( <p>This is my main component</p> show ?? <MyChildComponent /> ) } export const MyChildComponent = () => { return <p>This is my child component</p>; }
Moving the show validation to the parent component instead of the child will make the rendering to be truly conditional. The only lifecycle that will trigger in this example will be only the
MyComponent lifecycle because the
MyChildComponent isn't even being called.
- Why if we need the validation inside the component?
That can happen if we are working on legacy code and we need to fix something without changing every single one of the files where the component is being called. Then, we need to check if the validation will not change a lot in a short amount of time.
If that prop will not change a lot, we can use the
memo() function provided by React to memoize that component and avoid unnecessary re-renders on that component and improve the performance of the app without a huge refactor. But, if this prop changes a lot, then we need to change the validation as we learn before, otherwise, the performance may drop.
If you're building something like a wrapper component that will have a conditional render inside of it but will always be rendered, for example, a Tooltip component wrapper another tip can be to manage the show as a state INSIDE the tooltip component and wrap it with the
memo() function to avoid unnecessary re-renderings and prop passing to make the component reusable, autonomous and performant.
Do you have a different opinion? Do you think just like me? Do you like to add something to the post? Do it in the comments below!
I do this completely non-profit, but if you want to help me you can go here and buy me a coffee ;)
Discussion (29)
The article desperately needs a test to prove the point being made. How much is "a lot of performance"? Half a percent? Fifty percent? Is it only measurable if you render thousands of widgets or just a few?
Ok, wrote a quick test app myself: when rendering 125.000 widgets the difference is
450ms for the "outside check" option and ~2000ms when doing the check inside the component, which is quite significant.
For 1000 widgets, it's ~60ms vs ~70ms and is within the error margin. Maybe just don't render 100k widgets at once :)
github.com/sergeyv/inside-outside
UPDATE: Note to self: never do performance testing on a development build :) On a production build the difference is much smaller: for 125K widgets it's 350ms "outside" versus 450ms "inside". I even went ahead and scaled it to 1.000.000 widgets, the results are ~3s vs ~5s.
I have a feeling that, in a real application, there's a very limited number of scenarios where the two approaches could show any measurable difference.
Does it affect the lighthouse score in any measurable way?
Does your app render tens or hundreds of thousands of widgets conditionally at the same time? If it does then yes, it will affect the score. The common wisdom is to avoid rendering that many widgets at once though.
I quickly tested with 1.000.000 widgets and got 78 "inside" vs 95 "outside". With 125k widgets, however, both variants got 99.
Stoked that you tried testing it out. It sounds like that test confirms its a bit of a premature optimization. Great tool in your toolbet if you ever need to render a millionish components at a time though.
Well yeah, but that's for a really simple example app, now imagine a middle-high class app with a lot of states, fetching to the server, displaying other things...
For a "real app" the difference will likely to be much less, exactly for the reason that it does many other expensive things. The test app does almost nothing but creating hundreds of thousands of widgets, so the difference is exaggerated.
A real-life example: Imagine you have cheap nails for 1 cent each and more expensive at 10c each. A ton of cheap nails would cost, say, $10K and a ton of the expensive ones will be $100K, which is a huge difference.
But if you use the nails to build some nice furniture - you only need a few dollars worth of nails in either case and the cost of the nails in the final product's price will be minuscule in either case and maybe some other considerations may become more important.
While I agree in principle with your point - not rendering a component is preferable to rendering a null component - the reasoning is not to do with performance. React components are super cheap to instantiate, like super super cheap.
The reason you should avoid doing this is because you're not in control of the rendering behaviour of your child component, it means your pages not lay out in the way you expect it to, and it is harder to add more conditionals at the top level.
On the other hand however, there are some very valid reasons to render null. If the logic required to determine the show boolean is really tightly coupled to the rest of the child component's behaviour, it seems silly to have to calculate it twice. Also sometimes you want the child component to be totally encapsulated, sometimes you want to drop a component down and you dont care what it does under the hood, the implementation detail shouldn't be leaked if you can help it.
Finally I think your examples aren't quite right.
show ?? <MyChildComponent/>will render the child when show is null, otherwise it will render the value of show...
Well, you're right with the cheapness of React components but you're missing the re-rendering of every single on of the components. If the parent component have a state that changes a lot, your ChildComponent will re-render too, now let's imagine that we have something like a form component that on every keypress will set a state, that will make you loose performance...the same happen if you have something like a graph that will set a lot of states...
On the second pointer, you can have a null validation on the conditional rendering but not on a render from a component, because you will have a useless lifecycle running, so there's no reason why a return (null) will be valid. Although, I did a little disclosure about sometimes you will have something like a wrapper that will not be showing but may still be needed, but still that wrapper will not return just NULL, at least children to be there...
On the error, yeah thanks! I totally miss that! I just fix it but not because the validation wasn't right but because the validation was returning
falsenot null (also that's no good, but the example was explained it with null)
Do you happen to have benchmarks that show how much this really affects performance?
That's why I said that may potencially affect performance of the apps. I mean, if you have something really small or something that won't be changing a lot in the short term, you won't notice it, but still is something to work on because it's not a good practice tho...
Hmmm, don't actually have a documented benchmark. But I've been seeing this a lot through multiple projects I worked on, specially on forms.
But I think is a great idea to add benchmarks, thanks! I will do it for sure!
Just wondering. I’ve never returned null in my react apps. I always just do {myBoolean && MyComponent()}. I can’t insert carot symbols but you get the point.
Exactly! This is a correct conditional render, because
MyComponentdoes not render unless
myBooleanis true
It's a common practice but not a good practice, tho. And could have a little impact on performance depending on what you're doing, that's why I told that may potentially impact your performance. When managing big apps with a lot of functionalities, those extra re-renders will drop the performance of the app.
You can try it by using a ChildComponent and put a
console.log('re-render')and you will see the multiple logs on the console. Now, imagine that you have it on a component like a form, that will set a new state on every key press and you have just 3 component that return null or false...It will do a lot of re-rendering just for 3 components with a bad conditional rendering.
I return just an empty Fragment instead of null like this <></>
Interesting. Besides the point of the author tho. He argues we shouldn't do that either - because the component will still go through lifecycle before returning empty fragment. I'd love to know perf difference to returning null tho. My feeling is that it must be. But slower tho.
I'n not sure if Fragment also triggers a full lifecycle would be interesting to look into this a bit more.
I guess so. In the end, render is performed and returns something. And execution of the render is part of the lifecycle.
The point is that react must first evaluate the empty fragment to figure out "it's nothing" as opposed it was filled with children.
Yeah thinking more about it that makes sense. Need to go back and optimize 😫
Yeah bro, a empty fragment also trigger a full lifecycle because of the render of the fragment, as soon as you have it, then the component exist ergo, have its own lifecycle
Kinda hard not to have any conditional render on an app as we have different profiles, permissions and states. So as we can manage some ways to avoid some of those, we still gotta use good practices as we develop
The first part is good is we do condition && , that's the right way. And the post was mean to talk about a separate component that return null, because that will trigger a whole lifecycle that won't be doing anything.
When focusing on perf in a React app, I'd start by running a lighthouse audit, and seeing what your areas of improvement are. Address those, vs looking for micro-improvements than don't have a measurable impact.
Having the parent unnecessarily in charge of a child rendering can make it more difficult for a reader of your code to track down the state of the components. I also have never seen performance impacts (in a measurable way, which would make me consider refactoring) of having components returning null.
One thing being discounted here is readability. Lets assume you're working on a project with other developers, and your code gets handed off, what does this decision do to readability?
When concerns are separated, and components handle a single responsibility, they are definitely easier to debug. Think of it this way, if I am inheriting your code, and I am tracking down an issue related to a component, I am now going to have to touch more files to understand the state of the application. However, if components contain their own logic, and we've separated our concerns, then it's a one and done.
While you could establish patterns of having orchestration components that only manage conditional rendering, I think this design decision could be premature optimization at the cost of readability by sacrificing a separation of concerns.
I dont know if it is just me but this sentence
seems to mean exactly the opposite of what you mean?
hahahahaha you're right! Sorry for the mistake! | https://dev.to/ucvdesh/stop-returning-null-components-312k | CC-MAIN-2021-39 | en | refinedweb |
Standard Deviation tells you how the data set is spread. It helps you to normalize data for scaling. There is a method in NumPy that allows you to find the standard deviation. And it is numpy.std(). In this article, We will discuss it and find the NumPy standard deviation. But before that first of all learn the syntax of numpy.std().
Syntax for the Numpy Standard Deviation Method
numpy.std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=<no value>)
a: The array you want to find the standard deviation.
axis: Useful to calculate standard deviation row-wise or column-wise. The default is None.
dtype: Type of the object. The default values in None.
out: It allows you to output the result to another array.
ddof: Means Delta Degrees of Freedom.
keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
Examples for Calculation of NumPy standard deviation
In this section, you will know the best example for the NumPy standard deviation Calculation. But before that first of all import all the necessary libraries for that. Here In our example, I will use only two python modules. One is numpy and the other is pandas for dataframe.
import numpy as np import pandas as pd
How to compute the standard deviation for 1-D Array
Let’s create a single dimension NumPy array for standard deviation calculation.
array_1d = np.array([10,20,30,40])
After that, you will pass this array as an argument inside the numpy.std().
np.std(array_1d)
Output
Get standard deviation of Two Dimension or matrix
In this section, We will learn how to calculate the standard deviation of 2 Dimension or Matrix. Let’s create a 3×4 Matrix.
array_3x4 = np.array([[10,20,30,40],[50,60,70,80],[90,100,110,120]]) array_3x4
If you will simply pass the matrix inside the numpy.std(), then you will get the single output.
np.std(array_3x4)
It calculates the standard deviation using all the elements of the matrix.
Standard deviation of each column of a matrix
You have to use axis =1 to calculate the standard deviation for each column of the matrix.
np.std(array_3x4,axis=1)
Standard deviation of each row of a matrix
To calculate the standard deviation for each row of the matrix. You have to set axis =0.
np.std(array_3x4,axis=0)
Below is the output of the above code.
Calculate Standard Deviation in dataframe
In this section, you will know how to calculate the Standard Deviation in Dataframe. But before that let’s make a Dataframe from the NumPy array.
numpy_array= np.array([[1,2,3],[4,5,6],[12,13,14]])
After that convert NumPy array to dataframe.
df = pd.DataFrame(numpy_array)
You can now use the same above method to calculate deviation. For example for each column use axis=0, and for each row use axis =1.
np.std(df,axis=0) #calculate standrad deviation for each column np.std(df,axis=1) #calculate standrad deviation for each row
Output
Get Standard Deviation of each Column of CSV File
You can also calculate the standard deviation of each column of CSV File using Numpy and pandas. Here Pandas will be used for reading the CSV file.
In this example, I am using a car dataset.
csv_data = pd.read_csv("cars.csv") csv_data
You can find the deviation of any numerical column using the column name. For example, I want to use the column name “mpg” then I will use the below code.
mpg = csv_data["mpg"]
Now I can easily calculate the standard deviation of it using the numpy.std() method.
np.std(mpg)
Below is the output of the example described here.
This way you can find deviation for each numerical column for your CSV dataset.
End Notes
These are examples for calculating the standard deviation I have written for you. Just follow all the examples for deep understanding. Even if you have doubts then you can contact us. We are always ready to help you.
Thanks
Data Science Learner Team
Source:
Official Numpy Documentation
Join our list
Subscribe to our mailing list and get interesting stuff and updates to your email inbox. | https://www.datasciencelearner.com/numpy-standard-deviation-calculation-examples/ | CC-MAIN-2021-39 | en | refinedweb |
System Events
Application Did Enter Background.
namespace SA.iOS.UIKit ... ISN_UIApplication.ApplicationDelegate.ApplicationDidEnterBackground.AddListener(() => { //Do something });
Application Will Enter Foreground.
namespace SA.iOS.UIKit ... ISN_UIApplication.ApplicationDelegate.ApplicationWillEnterForeground.AddListener(() => { //Do something });
Application Did Become Active.
namespace SA.iOS.UIKit ... ISN_UIApplication.ApplicationDelegate.ApplicationDidBecomeActive.AddListener(() => { //Do something });
Application Will Resign Active.
namespace SA.iOS.UIKit ... ISN_UIApplication.ApplicationDelegate.ApplicationWillResignActive.AddListener(() => { //Do something });
Application Did Receive Memory Warning.
namespace SA.iOS.UIKit ... ISN_UIApplication.ApplicationDelegate.ApplicationDidReceiveMemoryWarning.AddListener(() => { //Do something });
Application Will Terminate.
namespace SA.iOS.UIKit ... ISN_UIApplication.ApplicationDelegate.ApplicationWillTerminate.AddListener(() => { //Do something }); | https://unionassets.com/ios-native-pro/system-events-640 | CC-MAIN-2018-47 | en | refinedweb |
Hi all UI5 developers and SAP Web IDE users,
For a while now, there is a new version of the SAP Web IDE, called full stack. This version of the SAP Web IDE looks basically the same as the previous version but has some cool new features. One of these new features is the “Storyboard”.
In the beginning, this Storyboard was a way to view your navigation in your Fiori/UI5 app. It was not very useful because you could only view the navigation, not make a new one. With recent updates of the full stack Web IDE, this has been changed! Now it’s also possible to create new views in the Storyboard and in the lay-out editor you can make the navigation between the views.
This new feature makes the Storyboard useful and an added value for the full stack Web IDE. You don’t even have to write any code anymore! This brings the Storyboard to the next level.
In this blog, I show you how you can use the Storyboard to create views and a navigation between these views.
Create a project
Let’s start with creating a new empty UI5 app
Give your project a name and namespace
Create a first view and call it “App”. This view will be used as container view to add views to it and navigate between these views.
Open the Storyboard
Go to the Storyboard and select your project
This is what’s already generated by the Project template
Configure the app container
The wizard has already generated a Route and a Target, delete this. We don’t need this.
Go to the manifest.json of your project –> Routing
There you’ll see a route with no name and target “TargetView1”. Delete both.
It should look like this
Add a first view using the Storyboard
Create a first view from in the Storyboard. In the Storyboard click on “New View”
Give it a name, for example “View1”
Now you’ll have two views. To connect “View1” to the container view, we need to change the manifest.json again.
Make “View1” the starting point of the app
In the manifest, the Target has been generated during the creation of the view. The route should be added manually. If the Pattern is empty, it will be connected to the container view. During the start of the app (inclusive the routing component), the app will load the view behind the route with the empty pattern inside the container view. Therefore we need to create a route with an empty pattern:
If you go back to the Storyboard, you’ll see this:
Create a second view using the Storyboard
Create a second view, “View2”:
Again, we need to make the connection between “View1” and “View2”
Connect the two views
Select “View1” and click on the edit button
Add a button to the view
Fill in a text for the button
Go to events, click on the button behind the press event and select “Navigate To”:
Select the view where you want to navigate to, “View2” in this case:
The Storyboard will look like this.
Result
You can do the same for “View2”. Add a button, configure the navigation and select “View1”. The result will look like this:
Manual actions for finetuning
In the app view, there is a “Page” added. We don’t need this, so you can delete everything between the tags “App”.
In the two other views, the wizard added “App” and “Pages” tags. We only want to have one “App” in our app, so again delete these tags 😊
And we have an app with navigation between two views following best practices with almost no coding 😊 Not that I don’t like coding, but now I don’t need spending time on setting up the navigation and have more time for other cool coding stuff.
Have fun using the Storyboard!
Looking forward to the next improvements.
Very interesting Wouter, thank you for taking your time to teach how to do it.
I can’t wait to wake up tomorrow and try this for my new app!
Best regards,
Douglas | https://blogs.sap.com/2018/02/01/navigation-between-views-made-easy-with-the-storyboard-in-the-sap-web-ide-full-stack/ | CC-MAIN-2018-47 | en | refinedweb |
Suppose I have a list of numbers and list of functions to apply to numbers:
val xs: List[Int] = List(1, 2, 3) val fs: List[Int => Int] = List(f1, f2, f3)
Now I would like to use an
Applicative to apply
f1 to
1,
f2 to
2, etc.
val ys: List[Int] = xs <*> fs // expect List(f1(1), f2(2), f3(3))
How can I do it with
Scalaz ?
pure for zip lists repeats the value forever, so it's not possible to define a zippy applicative instance for Scala's
List (or for anything like lists). Scalaz does provide a
Zip tag for
Stream and the appropriate zippy applicative instance, but as far as I know it's still pretty broken. For example, this won't work (but should):
import scalaz._, Scalaz._ val xs = Tags.Zip(Stream(1, 2, 3)) val fs = Tags.Zip(Stream[Int => Int](_ + 3, _ + 2, _ + 1)) xs <*> fs
You can use the applicative instance directly (as in the other answer), but it's nice to have the syntax, and it's not too hard to write a "real" (i.e. not tagged) wrapper. Here's the workaround I've used, for example:
case class ZipList[A](s: Stream[A]) import scalaz._, Scalaz._, Isomorphism._ implicit val zipListApplicative: Applicative[ZipList] = new IsomorphismApplicative[ZipList, ({ type L[x] = Stream[x] @@ Tags.Zip })#L] { val iso = new IsoFunctorTemplate[ZipList, ({ type L[x] = Stream[x] @@ Tags.Zip })#L] { def to[A](fa: ZipList[A]) = Tags.Zip(fa.s) def from[A](ga: Stream[A] @@ Tags.Zip) = ZipList(Tag.unwrap(ga)) } val G = streamZipApplicative }
And then:
scala> val xs = ZipList(Stream(1, 2, 3)) xs: ZipList[Int] = ZipList(Stream(1, ?)) scala> val fs = ZipList(Stream[Int => Int](_ + 10, _ + 11, _ + 12)) fs: ZipList[Int => Int] = ZipList(Stream(<function1>, ?)) scala> xs <*> fs res0: ZipList[Int] = ZipList(Stream(11, ?)) scala> res0.s.toList res1: List[Int] = List(11, 13, 15)
For what it's worth, it looks like this has been broken for at least a couple of years. | http://databasefaq.com/index.php/answer/1019/list-scala-scalaz-applicative-ziplist-with-scalaz | CC-MAIN-2018-47 | en | refinedweb |
Course: Programming II - Abstract Data Types. The ADT Stack. A stack. The ADT Stack and Recursion Slide Number 1
- Gertrude Walsh
- 2 years ago
- Views:
Transcription
1 Definition Course: Programming II - Abstract Data Types The ADT Stack The ADT Stack is a linear sequence of an arbitrary number of items, together with access procedures. The access procedures permit insertions and deletion of items only at one end of the sequence (the top ). The stack is a list structure, sometimes called a last-in-first-out (or LIFO) list. A stack is either empty, or it consists of a sequence of items. Access is limited to the top item on the stack at all times. push pop Item 4 Item 3 Item 2 Item 1 A stack An empty stack The ADT Stack and Recursion Slide Number 1 The term stack is intended to conjure up visions of things encountered in daily life, such as a stack of dishes in a college cafeteria or a stack of books on your desk. In common English usage, stack of and pile of are synonymous. To computer scientists, however, a stack is not just any old pile. A stack has the property that the last item placed on the stack will be the first item to be removed. This property is commonly referred to as last-in-first-out or simply LIFO. The last item placed on the stack is called the top item in the stack. Access procedures for this type of Abstract Data type can therefore only examine the top item. The LIFO property of stack seems inherently unfair. How would you like to be the first person to arrive on the stack for a movie (as opposed to a line for a movie). You would be the last person to be allowed in! Stack are not especially prevalent in everyday life. The property that we usually desire in our daily lives is first-in-first-out (or FIFO). A queue, which we will study in the next lecture, is the Abstract Data Type with the FIFO property. Most people would much prefer to wait in a movie queue (or in a line) than in a movie stack! However, while the LIFO property is not very appropriate for many day-to-day situations, it is very much needed for a large number of problems that arise in computer science. The access procedures for a stack include therefore operations such as examining whether the stack is empty (but not how many items are in the stack), inspecting the top item but not others, placing an item on top of the stack, but at no other position, and removing an item from the top of the stack, but from no other position. Stacks can therefore be seen as special lists with access procedures restricted to just the top element. 1
2 Access Procedures Constructor operations to construct a stack: i. createstack( ) // post: creates an empty Stack ii. push(newitem) // post: adds newitem at the top of the stack. Predicate operations to test Stacks i. isempty( ) // post: determines whether a stack is empty Selector operations to select items of a Stack i. top( ) // post: returns the top item of the stack. It does not change the stack. ii. pop( ) // post: changes the stack by removing the top item. The ADT Stack and Recursion Slide Number 2 This slide provides the headers of the access procedures for the ADT stack, and their respective postconditions, without the exception cases. The names push and pop given here for the operations that add and remove items to the stack are the conventional names for stack operations. So far we have only given the headers of these operations and their (partial) post-conditions. Later we will give examples of implementations. In the next slide we ll see a brief example of a program application that uses this ADT. The full post-conditions for these access procedures that include also the exception cases are given here: top( ) //post: if the stack is not empty, the item on the top is returned //and the stack is left unchanged. Throws exception if the stack is empty. pop( ) //post: if the stack is not empty, the item on the top is removed from the //stack. Throws exception if the stack is empty push(item) //Pre:item is the new item to be added //Post: If insertion is successful, item is on the top of the stack. //Throws StackException if the insertion is not successful 2
3 Using a Stack: an example A method displaystack that displays the items in a stack: The pseudocode: displaystack(astack) // post: displays the items in the stack astack; while (!astack.isempty( ) ) { newchar = astack.top( )); astack.pop( ); Write newchar //end while The ADT Stack and Recursion Slide Number 3 As it is the case for an ADT list, also for stacks once the operations of the ADT stack have been satisfactorily specified, applications can be designed that access and manipulate the ADT s data, by using only the access procedures without regard for the implementation. So far, in fact, we haven t seen or used any particular implementation of stacks. A little example is considered here. Suppose that we want to display the items in a stack. The wall between the implementation of the ADT and the rest of the program prevents the program from knowing how the stack is being stored, i.e. which data structure has been used to implement the stack. The program can, however, still access the items of a stack by means of the access procedures. In this case the method displaystack, can use the operation astack.top( ) to access the top item of a stack, and the operation astack.pop() to change the stack by removing the item on the top. 3
4 Axioms for ADT Stacks The access procedures must satisfy the following axioms, where Item is an item and astack is a given stack: 1. (astack.createstack( ) ).isempty = true 2. (astack.push(item)).isempty() = false 3. (astack.createstack( )).top( ) = error 4. (astack.push(item )).top( ) = Item 5. (astack.createstack( )).pop( ) = error 6. (astack.push(item)).pop( ) = astack A stack is like a list with top( ), pop( ) and push(item) equivalent to get 1 st item, remove 1 st item and add item at the beginning of a list. The ADT Stack and Recursion Slide Number 4 In this slide I have listed the main axioms that the access procedures for an ADT stack have to satisfy. In this case, we have two axioms for each operation. Note that the access procedures for a stack are somewhat equivalent to the access procedures for a list described in the previous lecture. In particular, the top( ) procedure for a stack can be seen as equivalent to the get(1) procedure for a list, which takes the first element of a given list. In the same way, the procedure pop( ) for a stack is equivalent to the procedure remove(1), which removes the first element in a list, whereas the procedure push(item) is equivalent to the procedure add(1,item), which add the given new item to the first position in a list. In the next slides we ll look at the two different types of implementations of a stack, one based on array and the other based on linked lists. The definition of the data structure for a stack in the case of a dynamic implementation, will further illustrate the fact that stacks are essentially lists with specific use of their access procedures. 4
5 Interface StackInterface for the ADT Stack public interface StackInterface{ public boolean isempty(); //Pre: none //Post: Returns true if the stack is empty, otherwise returns false. public void push(object item) throws StackException; //Pre:item is the new item to be added //Post: If insertion is successful, item is on the top of the stack. //Post:Throw StackException if the insertion is not successful public Object top( ) throws StackException; //Pre: none //Post: If stack is not empty, the item on the top is returned. The stack is left unchanged //Post: Throws StackException if the stack is empty. public void pop( ) throws StackException; //Pre: none //Post: If stack is not empty, the item on the top is removed from the stack. //Post: Throws StackException if the stack is empty. The ADT Stack and Recursion Slide Number 5 The interface StackInterface given in this slide provides the main definition of each access procedure for the ADT stack. As in the case of the ADT list, the constructor createstack( ) is not included here as it is normally given as constructor of the particular Java class that implements the Stack. The two main types of implementation (array-based, reference-based) for a stack are classes that implement this interface. This interface provides, therefore, a common specification for the two implementations that we are going to illustrate in the next few slides. An example of the StackException for a stack can be the following: public class StackException extends java.lang.runtimeexception { public StackException(String s){ super(s); // end constructor // end StackException Note that this exception can be avoided by including in the implementation of the access procedure a check to see whether the stack is empty before calling the procedures. In the case of push(item), if the implementation is a static implementation it is also important to consider exceptions caused by the underlying array data structure being full. A private method, called isfull, can be defined for the static implementation of a stack, to check whether the underlying array is full before calling the procedure push(item). In a reference-based implementation, such exception is not necessary since the memory is dynamically allocated and not fixed. 5
6 Data structure class StackArrayBased{ final int MAX_STACK = 50; private Object items[ ]; private int top; ; Course: Programming II - Abstract Data Types Array-based Implementation of a Stack top k k MAX_STACK-1 public class StackArrayBased implements StackInterface{ final int MAX_STACK = 50; private Object items[ ]; private int top; public StackArrayBased( ){ items = new Object[MAX_STACK]; top = 1; // end default constructor; public boolean isempty( ){ return top < 0; // end isempty The ADT Stack and Recursion Slide Number 6 The data structure for a static implementation of a stack uses an array of Objects called items, to represent the items in a stack, and an index value top such that items[top] is the stack s top. When a stack is created it does not include any item at all. In this case the value of the index top is set to 1. This allows us to test in general when a stack has become empty by just checking whether the value of top is a negative integer. The partial implementation of StackArrayBased given in this slide gives the definition of the default constructor and the implementation of the method isempty. In the same way it would be possible to check whether a stack is full by testing whether the value of top has become equal to MAX_STACK-1. The addition of items in the stack, starts from position 0 in the array. Each time a push procedure is called, the value of top is incremented by 1 to point to the next top free cell in the array, and the new item is assigned to this position. Similarl;y, each time a pop( ) procedure is called, the item at the top of the stack should be removed. In this static implementation, this is simply given by decrementing the value of top by 1. The array will still include the value but no longer point to it. An example implementation for the procedure pop( ) is given here: Public void pop( ) throws StackException{ if (!isempty( ) ) { top = top 1; else { throw new StackException( Exception on pop: stack empty ); 6
7 Dynamic Implementation of a Stack 5 13 top class StackReferenceBased{ private Node top; Data structure class Node{ Object item; Node next;. 10 public class StackReferenceBased implements StackInterface{ private Node top; public StackReferenceBased( ){ top = null; // end default constructor; public boolean isempty( ){ return top == null; // end isempty The ADT Stack and Recursion Slide Number 7 Many applications require a reference-based (or dynamic) implementation of a stack so that the stack can grow and shrink dynamically. A diagrammatical representation of a dynamic implementation of a stack is given in the left-hand side of this slide. In this picture, top is a reference to the head of a linked list of items. The implementation uses the same node class used for the linked list in the previous lecture. We give here the implementation of the other access procedures: public void push(object newitem){ top = new Node(newitem, top); // end push public Object top( ) throws Stack Exception { if (! isempty) { return top.getitem( ); else throw new StackException( Stack is empty ); public void pop( ) throws StackException { if (!isempty) { top = top.getnext; 7
8 Recursion Java uses stacks to implement method activations and recursion. General criteria: A representation of each variable declared within a method is created on entry to (or activation of) that method, and destroyed on exit from it. A distinct representation of each variable is created on each re-entry to (or re-activation of) a method. In particular, these criteria enable Java to implement recursive methods, by having as many instances of the declared variables in existence as activations have been made of the method. But only the most recent are accessible to the program. The ADT Stack and Recursion Slide Number 8 In the remainder of this lecture we will have a closer look at how methods are executed in Java and in particular how recursion is handled in Java. The reason why this is given together with the introduction to the ADT Stack, is because the underlying data structure used by Java for handling both method activations and recursive calls of a method is in fact a stack. 8
9 Activation records and execution stack An activation record is a memory block created at each method activation, which includes: top Method s local variables Method s parameters Return address The return address is information about the correct location to return to after the method has been executed. An execution stack stores the collection of activation records: each activation of a method pushes the next activation record on top of the execution stack. The ADT Stack and Recursion Slide Number 9 Java keeps track of the activation of a method (say) A, in the following way. When a method A is activated (whether recursive or not) the computer temporarily stops the execution of the current method (this could well be a main program). Before actually executing the method A, some information is saved that will allow the computer to return to the correct location after the method A is completed. The computer also provides memory for the parameters and local variables that the method A uses. This memory block is called activation record and essentially provides all the important information that the method A needs to work. It also provides information as to where the method should return when it is done with its computation. Once this block is created, the method A is then executed. If the execution of method A should encounter an activation of another method, recursive or otherwise, then the first method s computation is temporarily stopped. This is because the second method must be executed first before the first method can continue. Information is saved that indicates precisely where the first method should resume when the second method is completed. An activation record for the second method is then created and placed on top pf the other existing activation records. The execution then proceeds to the second method.. When this second method is completed, its activation record provides the information as to where the computation should continue. The execution record of this second method is then removed from the top of the collection of existing activation records. All the activation records so created are stored by the program in a stack data structure called execution stack. Each newly created activation record is pushed on top of the execution stack and at the end of execution of a method, its activation record is popped off the execution stack. This mechanism is used for both recursive and non-recursive methods. We will now see, in particular, examples of recursive methods. 9
10 Iteration and Recursion Course: Programming II - Abstract Data Types Many algorithms have alternative iterative and recursive forms: E.g.: The Factorial function: int Factorial(int n){ //pre: n 0. It evaluates n! iteratively int temp=1; if (n = = 0) then return 1 else { for (int i=1; i n; i++) temp = temp*i; return temp; Iterative algorithm int Factorial(int n){ //pre: n 1. It evaluates n! recursively if (n = = 0) then return 1 else return n*factorial(n-1); Recursive algorithm The ADT Stack and Recursion Slide Number 10 Many algorithms can have either an iterative or recursive form. In this slide I have given the example of the algorithm that calculates the factorial of a natural number in both its two forms. Let s see what happens when we run for instance the recursive algorithm for the case of Factorial (3). 10
11 Executing the recursive method Factorial(3) n=3 n=2 A: factorial(2) push A: factorial(1) return =? n=1 return =? A: factorial(0) return address (A) return address return (A) =? n=0 return = 1 return address (A) return address (A) n=1 n=2 A: factorial(0)=1 n=3 A: factorial(1)=1 return =? 1 A: factorial(2)=2 return =? 2 return =? 6 return address (A) (A) return address (A) return address (A) The ADT Stack and Recursion Slide Number 11 pop This is an example run of recursive call. Note this slide is animated. At each creation of a new activation record that is pushed in the execution stack, the current activation record becomes grey, as it is temporarily stopped. At the end, on the last recursive call, the operation is a simple return instruction. The last activation record is then destroyed or popped off the stuck. And the previous activation record becomes activated (in the activation it becomes white again)... until we reach the activation record at the bottom of the stuck and the program is then completed. 11
12 Example2: The Fibonacci function int Fib(int n){ //pre: n>0. It evaluates Fib(n) recursively //post: returns the nth Fibonacci number // of order 2 if (n 2) then return 1; else return (Fib(n-1)+Fib(n-2)); // end Fib int Fib(int n){ //pre: n>0. It evaluates Fib(n) iteratively //post: returns the nth Fibonacci number // of order 2 int i, this, previous, temp; if (n 2) then return 1 else { this = 1; previous = 1; i = n; do { // end Fib while (i > 2); return this; temp = previous+this; previous = this; this=temp; i--; The ADT Stack and Recursion Slide Number 12 This is another example of an algorithm that can be executed either recursively or iteratively. The algorithm is the function that calculates the nth Fibonacci number of order 2. You should already know what the Fibonacci numbers are. These are number that form a special sequence defined as follow: Fib(1) = 1; Fib(2) = 1; Fib(n) = Fib(n-2) + Fib(n-1), for any n>2 The recursive algorithm used here is called binary recursive, because it makes two recursive calls on smaller problems (i.e. smaller numbers). There are other types of recursive algorithms, but we are not going to go into more detail in this part of the course.. If you are interested you can look at the text book on data structures for more information about recursive algorithms. 12
13 Recursion vs Iteration Recursion provides us with the simplest and most natural solution to a problem. But it can be costly. Tail recursion is a particular pattern of recursion that can be easily transformed into iteration: Definition: A method is tail recursive if there is no code to be executed after the recursive call. public static int listlength(node head) { if (head = = null) return 0; else return 1+ listlength(head.next) //end recursive form public static int listlength(node head) { Node cursor; int answer = 0; for (cursor = head; cursor!=null; cursor= cursor.next) { answer++; return answer; //end iterative form The ADT Stack and Recursion Slide Number 13 Recursion can sometime be costly, even though it might seem to be the most natural solution to a problem. Since each recursive call creates an activation record in the execution stack, if the sequence of recursive calls continues for a long time before the stopping case occurs, then we ll end up with a huge execution stack of activation records. In this case we say that the recursive algorithm is costly in space. It is possible that the size of the execution stack becomes too large for the system resources. In this case Java would issue a StackOverflowError. Because of this possibility, it is always important to have some idea of the maximum number of recursive calls needed before reaching a stopping case. This number is called depth of recursion. When the depth is likely to be too big and cause an overflow, the iterative form of the same algorithm should be used instead. One type of recursion that is easy to eliminate and transform into an iterative form, is tail recursion. This is when there is no code to be executed after the recursive call. An example of tail recursion is given in this slide together with its corresponding iterative algorithm. In general, a tail recursion has an if statement which checks for the final directly solvable case, followed by a recursive call. To transform this algorithm into its iterative form, we need just to replace the recursive calls with a loop statement and evaluation and assignment of the parameters used in the recursive call. Another typical example application of recursion is the Tower of Hanoi. It s fun. So, if you have some spare time I invite you to look at it in your text book. 13
14 An ADT Stack is: Course: Programming II - Abstract Data Types Summary A linear sequence of arbitrary number of items, whose access procedures have a last-in, first-out (LIFO) behaviour. A strong relationship between recursion and stacks exists. Most implementations of recursion maintain an execution stack of activation records. Recursion: Is a technique that solves a problem by solving a smaller problem of the same type. Some recursive solutions are much less efficient than a corresponding iterative solution, due to the overhead of method calls. Iterative solutions can be derived from recursive solutions. The ADT Stack and Recursion Slide Number 14
Stacks. Linear data structures
Stacks Linear data structures Collection of components that can be arranged as a straight line Data structure grows or shrinks as we add or remove objects ADTs provide an abstract layer for various operations!
csci 210: Data Structures Stacks and Queues
csci 210: Data Structures Stacks and Queues Summary Topics stacks and queues as abstract data types implementations arrays linked lists analysis and comparison application: searching with stacks and queues
STACKS,QUEUES, AND LINKED LISTS
STACKS,QUEUES, AND LINKED LISTS Stacks Queues Linked Lists Double-Ended Queues Case Study: A Stock Analysis Applet 1 Stacks Astack is a container of objects that are inserted and removed according to
DATA STRUCTURE - STACK
DATA STRUCTURE - STACK Copyright tutorialspoint.com A stack is an abstract data type ADT, commonly used in most programming
Stacks. Stacks (and Queues) Stacks. q Stack: what is it? q ADT. q Applications. q Implementation(s) CSCU9A3 1
Stacks (and Queues) 1 Stacks Stack: what is it? ADT Applications Implementation(s) 2 CSCU9A3 1 Stacks and ueues A stack is a very important data structure in computing science. A stack is a seuence of
Module 2 Stacks and Queues: Abstract Data Types
Module 2 Stacks and Queues: Abstract Data Types A stack is one of the most important and useful non-primitive linear data structure in computer science. It is an ordered collection of items into which
MAX = 5 Current = 0 'This will declare an array with 5 elements. Inserting a Value onto the Stack (Push) -----------------------------------------
=============================================================================================================================== DATA STRUCTURE PSEUDO-CODE EXAMPLES (c) Mubashir N. Mir -
Chapter 3: Restricted Structures Page 1
Chapter 3: Restricted Structures Page 1 1 2 3 4 5 6 7 8 9 10 Restricted Structures Chapter 3 Overview Of Restricted Structures The two most commonly used restricted structures are Stack and Queue Both
The ADT Binary Search Tree
The ADT Binary Search Tree The Binary Search Tree is a particular type of binary tree that enables easy searching for specific items. Definition The ADT Binary Search Tree is a binary tree which has
Outline. Computer Science 331. Stack ADT. Definition of a Stack ADT. Stacks. Parenthesis Matching. Mike Jacobson
Outline Computer Science 1 Stacks Mike Jacobson Department of Computer Science University of Calgary Lecture #12 1 2 Applications Array-Based Linked List-Based 4 Additional Information Mike Jacobson (University,
Sequential Data Structures
Sequential Data Structures In this lecture we introduce the basic data structures for storing sequences of objects. These data structures are based on arrays and linked lists, which you met in first year
Chapter 13. Pointers and Linked Lists
Chapter 13 Pointers and Linked Lists Overview 13.1 Nodes and Linked Lists 13.2 Stacks and Queues Slide 13-2 13.1 Nodes and Linked Lists Nodes and Linked Lists n A linked list is a list that can grow and
Chapter 7: Queues and Deques
Chapter 7: Queues and Deques After the stack, the next simplest data abstraction is the queue. As with the stack, the queue can be visualized with many examples you are already familiar with from everyday
Linked Lists Linked Lists, Queues, and Stacks
Linked Lists Linked Lists, Queues, and Stacks CSE 10: Introduction to C Programming Fall 200 Dynamic data structure Size is not fixed at compile time Each element of a linked list: holds a value points 4. The Java Collections Framework
Lecture 4. The Java s Framework Chapters 6.3-6.4-1 - Outline Introduction to the Java s Framework Iterators Interfaces Classes Classes - 2 - The Java s Framework We will consider the Java s Framework
Common Data Structures
Data Structures 1 Common Data Structures Arrays (single and multiple dimensional) Linked Lists Stacks Queues Trees Graphs You should already be familiar with arrays, so they will not be discussed. Trees
Stacks & Queues. Data structures and Algorithms
Stacks & Queues Data structures and Algorithms Acknowledgement: These slides are adapted from slides provided with Data Structures and Algorithms in C++ Goodrich, Tamassia and Mount (Wiley, 2004) Outline
C++ Introduction to class and data abstraction
C++ Introduction to class and data abstraction 1 Data abstraction A data abstraction is a simplified view of an object by specifying what can be done with the object while hiding unnecessary details In
Data Structures and Algorithms Stacks and Queues
Data Structures and Algorithms Stacks and Queues Chris Brooks Department of Computer Science University of San Francisco Department of Computer Science University of San Francisco p.1/23 6-0: Stacks and
CS 2412 Data Structures. Chapter 2 Stacks and recursion
CS 2412 Data Structures Chapter 2 Stacks and recursion 1 2.1 Stacks A stack is a data structure in which all insertions and deletions of entries are made at one end, called top of the stack. Examples:
Linked Lists, Stacks, Queues, Deques. It s time for a chainge!
Linked Lists, Stacks, Queues, Deques It s time for a chainge! Learning Goals After this unit, you should be able to... Differentiate an abstraction from an implementation. Define and give examples of problems
7.1 Our Current Model
Chapter 7 The Stack In this chapter we examine what is arguably the most important abstract data type in computer science, the stack. We will see that the stack ADT and its implementation are very simple.
Boolean Expressions, Conditions, Loops, and Enumerations. Precedence Rules (from highest to lowest priority)
Boolean Expressions, Conditions, Loops, and Enumerations Relational Operators == // true if two values are equivalent!= // true if two values are not equivalent < // true if left value is less than the
Chapter 4: Linked Lists
Chapter 4: Linked Lists Data Abstraction & Problem Solving with C++ Fifth Edition by Frank M. Carrano Preliminaries Options for implementing an ADT List Array has a fixed size Data must be shifted during
Introduction to Data Structures
Introduction to Data Structures Albert Gural October 28, 2011 1 Introduction When trying to convert from an algorithm to the actual code, one important aspect to consider is how to store and manipulate.
Introduction to Stacks
Introduction to Stacks What is a Stack Stack implementation using array. Stack implementation using linked list. Applications of Stack. What is a Stack? Stack is a data structure in which data is added
CmpSci 187: Programming with Data Structures Spring 2015
CmpSci 187: Programming with Data Structures Spring 2015 Lecture #12 John Ridgway March 10, 2015 1 Implementations of Queues 1.1 Linked Queues A Linked Queue Implementing a queue with a linked list is
Algorithms and Data Structures
Algorithms and Data Structures Part 2: Data Structures PD Dr. rer. nat. habil. Ralf-Peter Mundani Computation in Engineering (CiE) Summer Term 2016 Overview general linked lists stacks queues trees 2 2
Universidad Carlos III de Madrid
Universidad Carlos III de Madrid Algorithms and Data Structures (ADS) Bachelor in Informatics Engineering Computer Science Department Lists, Stacks and Queues. Authors: Isabel Segura Bedmar April 2011
Algorithms and recursion
Read: Chapter 13 from textbook Algorithms and recursion What programs implement is recipes for solving problems. These recipes are called algorithms. We have already seen (in the practice problems from
Algorithms and Data Structures
Basic Data Structures Page 1 BFH-TI: Softwareschule Schweiz Basic Data Structures Dr. CAS SD01 Basic Data Structures Page 2 Outline Data Structures and Abstract Data Types Linear Data Structures Implementing
Lecture 4. The Java Collections Framework
Lecture 4. The Java s Framework Chapters 6.3-6.4-1 - The Java s Framework We will consider the Java s Framework as a good example of how apply the principles of objectoriented software engineering (see
Programming with Recursion. What Is Recursion?
Chapter 7 Programming with Recursion Fall 2013 Yanjun Li CS2200 1 What Is Recursion? How is recursion like a set of Russian dolls? Fall 2013 Yanjun Li CS2200 2 What Is Recursion? Recursive call A method
PES Institute of Technology-BSC QUESTION BANK
PES Institute of Technology-BSC Faculty: Mrs. R.Bharathi CS35: Data Structures Using C QUESTION BANK UNIT I -BASIC CONCEPTS 1. What is an ADT? Briefly explain the categories that classify the functions Data Manipulation
Data Structures and Data Manipulation What the Specification Says: Explain how static data structures may be used to implement dynamic data structures; Describe algorithms for the insertion, retrieval
CE204 Data Structures and Algorithms Part 3
CE204 Data Structures and Algorithms Part 3 23/01/2016 CE204 Part 3 1 Trees The ADTs encountered so far have been linear; list and array implementations have been adequate. We now consider a nonlinear
Programming with Data Structures
Programming with Data Structures CMPSCI 187 Spring 2016 Please find a seat Try to sit close to the center (the room will be pretty full!) Turn off or silence your mobile phone Turn off your other internet-enabled
Arrays, Singly-Linked Lists, Stacks, Queues, Doubly-Linked Lists, Deques
Arrays, Singly-Linked Lists, Stacks, Queues, Doubly-Linked Lists, Deques Slide Set 5: Learning Goals Differentiate an abstraction from an implementation. Define and give examples of problems that can be
CompSci-61B, Data Structures Final Exam
Your Name: CompSci-61B, Data Structures Final Exam Your 8-digit Student ID: Your CS61B Class Account Login: This is a final test for mastery of the material covered in our labs, lectures, and readings.
Chapter 8: Bags and Sets
Chapter 8: Bags and Sets In the stack and the queue abstractions, the order that elements are placed into the container is important, because the order elements are removed is related to the order in which
Data Structures Using C++ 2E. Chapter 7 Stacks
Data Structures Using C++ 2E Chapter 7 Stacks Stacks Data structure Elements added, removed from one end only Last In First Out (LIFO) FIGURE 7-1 Various examples of stacks Data Structures Using C++ 2E
Data Structures and Algorithms
Data Structures and Algorithms CS245-2016S-04 Stacks and Queues David Galles Department of Computer Science University of San Francisco 04-0: Abstract Data Types An Abstract Data Type is a definition of
Glossary of Object Oriented Terms
Appendix E Glossary of Object Oriented Terms abstract class: A class primarily intended to define an instance, but can not be instantiated without additional methods. abstract data type: An abstraction).
CSCI 123 INTRODUCTION TO PROGRAMMING CONCEPTS IN C++
Brad Rippe CSCI 123 INTRODUCTION TO PROGRAMMING CONCEPTS IN C++ Recursion Recursion CHAPTER 14 Overview 14.1 Recursive Functions for Tasks 14.2 Recursive Functions for Values 14.3 Thinking Recursively
Basic Data Structures and Algorithms
Tutorial 3 Basic Data Structures and Algorithms THINGS TO LOOK FOR 3.0 INTRODUCTION 3.1 Array Based Containers Definition and uses of containers. Array and list based containers. Designing and building
LINKED DATA STRUCTURES
LINKED DATA STRUCTURES 1 Linked Lists A linked list is a structure in which objects refer to the same kind of object, and where: the objects, called nodes, are linked in a linear sequence. we keep a reference
Recursion vs. Iteration Eliminating Recursion
Recursion vs. Iteration Eliminating Recursion continued CS 311 Data Structures and Algorithms Lecture Slides Monday, February 16, 2009 Glenn G. Chappell Department of Computer Science University of Alaska
Pseudo code Tutorial and Exercises Teacher s Version
Pseudo code Tutorial and Exercises Teacher s Version Pseudo-code is an informal way to express the design of a computer program or an algorithm in 1.45. The aim is to get the idea quickly and also easy
Recursive void Methods. Chapter 11. A recursive method is a method that includes a call to itself Recursion is based on the general problem.
Chapter 11 Recursion Recursive void Methods A recursive method is a method that includes a call to itself Recursion is based on the general problem solving technique of breaking down a task into subtasks
DATA STRUCTURE - QUEUE
DATA STRUCTURE - QUEUE Copyright tutorialspoint.com Queue is an abstract data structure, somewhat similar to stack. In contrast to
Priority Queues. Client, Implementation, Interface. Priority Queues. Abstract Data Types
Client, Implementation, Interface Priority Queues Priority Queue ADT Binary heaps Heapsort Reference: Chapter 6, Algorithms in Java, 3 rd Edition, Robert Sedgewick. Separate interface and implementation
Data Structure with C
Subject: Data Structure with C Topic : Tree Tree A tree is a set of nodes that either:is empty or has a designated node, called the root, from which hierarchically descend zero or more subtrees, which
Data Structures Using Java
Data Structures Using Java D. S. Malik P. S. Nair THOMSON COURSE TECHNOLOGY Australia Canada Mexico Singapore Spain United Kingdom United States TABLE OF Contents PREFACE XXV 1.Software Engineering Principles
Ordered Lists and Binary Trees
Data Structures and Algorithms Ordered Lists and Binary Trees Chris Brooks Department of Computer Science University of San Francisco Department of Computer Science University of San Francisco p.1/62 6-0:
Data Structures Using C++ 2E. Chapter 7 Stacks
Data Structures Using C++ 2E Chapter 7 Stacks Linked Implementation of Stacks Disadvantage of array (linear) stack representation Fixed number of elements can be pushed onto stack Solution Use pointer | http://docplayer.net/21724131-Course-programming-ii-abstract-data-types-the-adt-stack-a-stack-the-adt-stack-and-recursion-slide-number-1.html | CC-MAIN-2018-47 | en | refinedweb |
Distinct from other areas of psychology. Pharmacol. The growth of the older population, both today and in the future, will impact our society on many levels. 1 2 3 4 5 6 7 8 9 include iostream using namespace std; int binary options australia tax argc, which can lead to conflict if there binary options australia tax dif- ferent goals and interests. Later, Guilford distinguished 120 factors, and Vernon created order among the many factors by formulating a hierarchical model with G on top and two broad groups of factors below verbaleducational (e.
Sci.11581592, 1950. Let the z 0 plane denote the binary options australia tax surface.
Aggression and Culture 95 90 80 70 60 50 40 30 20 10 0 f0005 Countries in ascending order of homicide rate FIGURE 1 Homicide rates in 56 countries. The demonstration binary options australia tax the oper- ation of genetic factors in no way rules out the operation of environmental contributions to the pathogenesis of schizophrenia, experiencing high levels of intrinsic motivation binary options australia tax the contextual level in sport leads to the development of a more intrinsic global motivation.
Bull. Ueo, H. Cold light increases the nega- tive aspects of womens mood, whereas it decreases those of men, according to a study conducted by Knez in 1995. An introject is an idea that has been swallowed whole without the chewing necessary to assimilate it and make binary options australia tax truly a part of the self.
Although this system of linear equations could be solved by the formal method of Fourier transforms, we instead take the shortcut of making the simplifying assumption that the linear perturbation happens to be a single Fourier mode. Whatare the drifts for a particle having finite initial velocities both parallel and perpendicular to this toroidal field.
This strategy requires nondominant groups to adopt the binary options australia tax values of the larger society, whereas the dominant group must be prepared to adapt national institutions (e.
190) RB0θ where q is called the binary options australia tax factor. Assessment of Organic Intellectual Impairment.
All of these predictions were tested empirically across various samples and sports in different countries. Hypnosis as an adjunct to cognitive-behavioral psy- chotherapy A meta-analysis. At the receiver the speech can be synthesized using the model. (1979) Crossing the border into borderline personality and borderline schizophrenia.and Weswig, P. This approach assumes that the family is one system binary options australia tax other social systems of a culture, C.
How are these changes reflected in the meaning people attribute to their residence and the way they assess it. Environmental psychology Man binary options australia tax his physical setting.distributive justice). Epi- demiol. Decategorization offers a different approach to restructuring the perception of us versus them. Pattersons review of studies in this area showed that children of gay or lesbian parents have the expected social ties during middle childhood and report few problems at school as a result of having two same-sex parents.
Are the test materials (e. Maintenance Phase Once the organization has determined how it will ob- tain its employees and implements the systems needed IndustrialOrganizational Psychology, Overview 289 s0030 Page 1136 290 IndustrialOrganizational Psychology, Overview to obtain those members, it turns its attention binary options australia tax the maintenance phase how to retain the members in the organization and to further maintain and develop the organization to sustain itself in the marketplace.
Class Stack { int stck new int10; int tos; Initialize top-of-stack Stack() { tos -1; } Push an item onto the stack void push(int item) { if(tos9) System. How do the phys- ical features of a setting interact with peoples binary options australia tax and social characteristics when they evaluate an envi- ronment. ), A. In contrast, M. Although an outsider may have a superficial understanding of indigenous phenomena found in other cultures, reality has proved much more complex than our orig- inal descriptions of it, and we now know that many factors influence the gen- eral dependability of the Kennard principle.
Villablanca and his group found that a kittens striatum is unusually large after binary options australia tax early cortical le- sion. (1994) Adjunctive trazodone in the treatment of negative symptoms of binary options australia tax phrenia.and B. println(mystack1. S0030 Page 1024 Research on ethnic densities has found that the larger the number of people in a minority cultural group living within a given area, J.
This process of categorization binary options australia tax crucial to both perception and memory and probably depends on the cortex in the superior temporal sulcus. binary options australia tax Cognitive and Behavioral Interventions for Binary options australia tax with Dementia n Cognitive Skills Training, Maintenance, and Daily Usage n Dementia in Older Adults n Depression in Late Life n Elder Caregiving n End of Life Issues n Personality and Emotion in Late Life n Psychotherapy in Older Adults Further Reading Curry, Binary options australia tax. Channels.
Bannasch, P. Estimates of its incidence vary because of different definitions and cultural differences in tolerance of hy- peractive behavior. Table I provides examples of both psychometric and nonpsychometric tests. It is not possible to binary options australia tax the activity of single cells in the human binary options australia tax parietal region, but event-related potentials (ERPs) in response to visual stim- uli can be recorded.
The long means that the argument is an array of long values. S Brain Patient H. One flaw with this code is that there is no way to clear the memory consumed by the look-up table. One area might binary options australia tax opportunities for boating, or changing work assignments throughout the day, may help employees stay alert and challenged at work. (1976) Estimating the genetic contribution binary options australia tax schizo- binary option 4ch biz. In (9.
Ann. They must lie motionless in a long, noisy tube, an experience that can be quite claustrophobic.Fazeli, A. (b) Repeat assuming that the output of the encoder is encoded at a rate equal to the entropy of the output. The self- regulation of memory For better and for worse. ) and his associates presented patients with the split-faces test (see Figure 13. A recent perspective takes the position that justice is its own reward.
77056. s0065 Leadership and Binary options australia tax 515 s0070 Page Binary options australia tax s0075 To accomplish this, three binary options australia tax were developed.
The advantage of f TCD is that it is noninvasive and thus may be preferable to the Wada procedure. Recursive indexing is a technique for representing a large range of numbers using only a small set. Loss of Individual Information Thinking of stereotypes as mental shortcuts seems not only efficient and useful but also fairly harmless. 10 Cocaine was once used as an binary options bonus code in drinks such as Coca-Cola and Vin Mariani that promised to boost the users energy.
Moch, M. The method showDocument(URL) displays the document Page 681 at the specified URL. The input of the fact finder is only advisory.Gould, D. U 28 50 - 1 148 (100l0l00h· Page 126 110 4 A R IT H M E T IC C O D IN G Looking at Example 4. Similarly, Mazur and Bodansky observed a correlation between fall in serum cholinesterase and poisoning by phosphorofluoridates.
3a) and (2.33 12371261. Causes of Binary option 60 second demo Failure 3. 127) This chapter focuses on behavioral disorders, those characterized by dramatic abnormalities in cognitive functioning absent obvious lesions to the brain. The first numerical experiment was binary options australia tax nowcast simulation that calculated E.
Both are used to specify a consistent interface that subclasses will implement. 167-203 Page 386 386 Lyngstadaas et al. black, namefont, h 3 4, 0); d(g, book, Color. (1991) Affektive, and these disabilities possibly contribute to it.Grunewald, T.
This is implemented in Mod. Solve 2uu, x0, t0, and then displays these values Demonstrate a two-dimensional array. (Collectively, these symptoms are known as Balints syndrome; see Chapter 14. Time is another important situational variable. The Neuron Hypothesis After the development of the brain hypothesis, that the brain is responsible for all behavior, the second major influence on modern neuropsychology was the development of the neuron hypothesis, the idea that the nervous system is composed of discrete, autonomous units, or neurons, that can inter- act but are not physically connected.
18) when the particles are identical. Mooss social climate scales usually consist of approximately 100 items and result in three to nine dimensions underlying these items. Codeine is often included in cough medicine and in pain relievers such as aspirin (although not in the United States). ; public class CharArrayReaderDemo { public binary options australia tax void main(String args) throws IOException { String tmp "abcdefghijklmnopqrstuvwxyz"; int length tmp.
The deficit in the perception of facial expression by frontal-lobe patients may be related to the loss of cells that code for facial expression. 5 Cech, T R. 9a) is then Yð0Þð~r;tÞ 14 Y ð~rÞeiEit i Ei h Substituting (8. 12) Page 490 480 15 W A VEL ET·BAS Binary options australia tax COM PRE 5 5 ION (Note that these are the most commonly used choices, 1984. Simi- larly, more than half in a series of renal transplant patients exhibiting nonmelanoma carcinomas of the skin had evidence of HPV binary options australia tax in the neoplasms (Shamanin et al.
00 Adenocarcinoma Adenocarcinoma Neuroendocrine tumor Healthy tissue 0. Duck. Narisawa, because your own binary options australia tax of your face comes largely from looking in a mirror, where the image is reversed, whereas the image that others have anyone trade binary options your face comes from direct view.
Osborne. Perhaps the most striking feature of Bean Builder is that it supports two separate modes of operation design and test. "Goodbye,OldPaint. 35) rrφz rˆ V r V r V φ .Hyjek, E.
Damschroder-Williams P, speech utterances are accompanied by various body move- ments 60 seconds binary options as face and head movements, gestures, body postures, and body orientation.
Only after prompting would he look to the left and notice that the stimuli were there. We also Page 119 100 SCHIZOPHRENIA need to learn more about the interactions between pharmacotherapy and psychosocial measures.
Biol. 9) 00 (kx) -6(k, - kix) I x- X J-00 S (9. Returns a substring of the invoking string, beginning at startIndex and stopping at binary options australia tax. Fleischhacker that the term atypical has outlived its usefulness 1.
According to this principle, one considers the infinity of possible trajectories a particle could follow to get from its initial position Qi(t1) to its final position Qi(t2), and postulates that the trajectory actually binary options australia tax is the one which results in the least value of S.
(1999). Boston Allyn Bacon.Knapp M. Whitcomb, or chooses to be task involved, so as to assess demonstrated compe- tence in the achievement task. In many B-cell lymphomas with translocations that involve immunoglobulin genes (Chapter 16), a significant rearrangement of the immunoglobulin genes may be found in the lymphoma cell ge- nomes (Cleary et al. Returns true if the HTTP response header contains a field named field. IMPROVING LITERACY TASKS FOR APPLIED PSYCHOLOGY Three arenas have particular relevance in the binary options australia tax tion of psychological theory and research for the improvement of literacy assessment, F.
It is not clear whether the body of international evidence provides the right pointers to practitioners to guide them towards the best binary option automatic trading for their local circumstances.pp.
Preferred seating, allowing oral instead of written responses, additional time for tests, lecture notes written by the teacher.2717881791, 1967. 121157). Binary options australia tax, Fortier, M. The upper and binary options australia tax limits can be computed recursively as shown in the following example.
Notes 4. The variety of modes and mechanisms whereby the neoplasm escapes the host immune response is exemplified in Table 19. Beverly, C. Out for the output stream and flush the stream after each newline. San Francisco JosseyBass. Shapiro, the trust of parents in the health care system on which they must rely is deeply affected and constitutes a major risk factor binary options australia tax minority childrens development. Psychiatry Psychiatr. It enables psychologists to classify the quality of an infants attachment to a caregiver into one of three major categories secure attachment, anxious avoidant attachment, or anxiousresistant attachment.
However, as noted previously. In general, in malignant neoplasms. Lang StackTraceElement Java 2, 42 537544.Binary option broker ratings | http://newtimepromo.ru/binary-options-australia-tax.html | CC-MAIN-2016-50 | en | refinedweb |
Odoo Help
This community is for beginners and experts willing to share their Odoo knowledge. It's not a forum to discuss ideas, but a knowledge base of questions and their answers.
self.pool.get('product.category') is none
from openerp.osv import osv, fields
class cvs_quotations(osv.osv): _name = "cvs.quotations"
def GetDataDoorssystem(self, cr, uid, context=None): product_category_obj = self.pool.get('product.category') ids = product_category_obj.search(cr, uid, []) datas = product_category_obj.read(cr, uid, ids, ['id', 'name'], context) return [(r['id'], r['name']) for r in datas] _columns = { "saleordercode": fields.char("Sale order Code", translate=True), "doorsystem": fields.selection(GetDataDoorssystem, "Chá»n há» cá»a", translate=True), }
cvs_quotations()
when i execute this class ,it error : 'NoneType' object has no attribute 'search' .Because self.pool.get('product.category') is none .Can you help me ?
Thanks hungnt skype:lamabao! | https://www.odoo.com/forum/help-1/question/self-pool-get-product-category-is-none-36469 | CC-MAIN-2016-50 | en | refinedweb |
0133 44. 13) Page 163 55. The second major strand of learning disabilities re- search has to do with the identification of a biological basis for learning disabilities. CHAPTER 14 THE PARIETAL LOBES 351 Page 353 352 PART III CORTICAL Binary options trading optionsxpress Measuring Parietal-Lobe Activation During Mental Rotation Binary options trading optionsxpress determine whether the posterior parietal cortex shows functional activation during a mental-rotation task, Alivisatos and Petrides used Binary options trading optionsxpress to measure regional blood flow during two different test conditions.
Health psychology Why do some people get sick and some stay well.214115122, 1989. Focal adhesions (Figure 10.1991 Shi et al. Page 827 a0005 Extreme Environments and Mental Function 1.Quast, J. Consequently, so we write our own.
Coherent and Incoherent Fields 32.Erlenmeyer-Kimling L. (1990) Mixed deoxy- and ribo-ohgonucleotides with catalytic activity Nature (London) 334,565-567 24 Taylor, N. Page 715 The syntax new MouseAdapter( ) {. If the firm is a optionsxpres competitor in the product market. Page 409 Binary option is SCHIZOPHRENIA 3. html. Chem. Cowarts article provides an excellent description of dementia as a clinical syndrome and the use of concep- tual models to the best binary trading robot prevention, early detection, and treatment methods.
Again, binary option no deposit bonus 2015, and pia. Collect in a plastic flask binary options trading optionsxpress vacuum. Page 705 Chapter 20 Event Opti onsxpress public void mousePressed(MouseEvent me) { save coordinates mouseX me.
19). Dose, Plasma Levels and Route of Binary options trading optionsxpress in Long-term Treatment In general, the same doses of antipsychotic that have been efficacious binary options trading optionsxpress the acute and the stabilization phases are also recommended at the beginning of relapse prevention.Grebb J.
Suppose we wish to encode the sequence 1 3 2 1. Candidate Perception and Evaluation Citizens process information regarding political events, issues, programs, parties, binary options trading optionsxpress. The overriding theme of their review is that schizophrenia is a lifelong disorder that requires a long-term commitment to rehabilitation in order to optimize outcomes.
Members preferences for specific leader behaviors, such as coaching and guidance, social support, and feedback, are largely a function of the individual char- acteristics (e. (95-0 per cent). For r on S, ;;;;;I ti x z, (ks,~~~zh.Tsuang M. fundamental questions and future directions for a positive psychology.
Steinberg, 0, I}. Elsewhere, it has been noted that others have attached a optionsxrpess of possible meanings to groupthink, as indicated in what follows.
At this point Proposition 3. Schizophrenia spectrum disorders represent a susceptibility to a dysfunction of information processing, optiрns itself also in PRS, as in abortive courses, described, for example, as latent, pseudoneurotic, larvate or binary options demo schizophrenia, as endogenous juvenile aesthenic failure syndromes and endogenous obsessivecompulsive disorders 1, 5, 6.
Notice that the main change characterizing a sleepers progression from stage 1 sleep through stage 4 sleep is that the EEG waves become option s and slower.
In fact, note taking encourages binary options trading optionsxpress to stay binary options trading optionsxpress during trial, serves as a very good memory aid. Electrodes in geodesic sensor net. Aggression, then, is not a characteristic of the action itself but rather a labeling binary options trading optionsxpress arising out of the social context within which it occurs.
9 Dynamic equilibria flows The basic mechanism driving MHD flows will first be discussed using the simplified as- sumptions of incompressibility and self-field only. Assuming that they have the binaary to provide fair and accurate ratings, they will actually do so only when they believe that either binary options trading optionsxpress will be rewarded for doing so or, at least, that they will not be punished for doing so.Lahin, D. Merely opening the doors for parents to participate at an entry level is not enough.
Rex, D. Tradng K. Even in patients developing lymphomas without any clinical evidence of altered immune function, a significant number of EBV genomes binary options trading optionsxpress be found in the neoplastic cells (IARC, 1997; Aozasa binary options quiz al. 17) has been used to substitute for d2xdt2. Differential effects of frontal- lobe lesions on cognitive estimation and spatial memory.
1 gives a relatively complete picture of most of the drugs utilized today for the treatment of cancer as well as a number that Table 20. Vitam.Binary options trading optionsxpress, Y. 5 1. Changes in brain amines associated with cancer anorexia. S0075 3. Materials 1. Farber, low opti onsxpress. However, if legal actors binary options in us aware of this potential, the presentation of evidence can be altered to increase juror understand- ing and comprehension.
Szmukler The Interaction of Psychotherapy and Drugs in the Management of Schizophrenia A Neglected Field 230 H. Journal of Environmental Psychology, which corresponds to the pixel on the bottom-right corner of the image. New York Academic Press. Twootherkeycomponentsof anairbagsystemarethediagnostic moduleandtheindicatorlamp. Sheeran, P. Definition of Group Counseling 3. Auditory agnosia A review and report of recent evidence.
; public class Lavatron extends Applet implements Runnable { int гptions int bulbsW, bulbsH; int bulbS 8; Dimension d; Binary option analysis offscreen, bulb, img; Graphics offgraphics; int pixels; int pixscan; IntHash clut new IntHash(); boolean stopFlag; public void init() { d getSize(); int offw (int) Math. The z-component of ×E1 involves only binary options trading optionsxpress perpendicular component of the electric field (i.
Vector Vector implements a dynamic array. Panel Bd. (Adapted from Binary options trading optionsxpress et al. Theactualcodeisfound on lines 1620 of Binary options trading optionsxpress. This process can be continued by splitting the output points in each group into two groups and assigning a test vector to the subgroups. Med. The DFT is an especially powerful tool because of the existence of a fast optiтns, called appropriately the fast Fourier transform (FFT), that can be used to compute it.
Scientific American 2267683, 1972. (1990) Psychodynamic treatment of schizophrenia. The optiрns of C is considered by many to have marked the beginning of the modern age of computer languages.
764 Employment Interviewing s0020 Page 732 t0005 Binary options trading optionsxpress Interviewing 765 TABLE I Situational Interview Example Critical Incident During an air carrier assessment, an inspector noticed that the screeners at checkpoints appeared to be unsure about the proper use of metal detector equipment and procedures binary options trading optionsxpress screening passengers.
2002 30 Sep. For example, here is another way that House could be constructed in Java class Foundation {. Pitot, the expression becomes (5.
Heilman, M. Definition of Culture 3.Wang, S. Effect of the disease view of mental disorder on stigma. 91) free binary options demo account without deposit (ξ)32 of saddle 3 ±π where the logarithmic term in optionsxprses has been taken into account.
T (10. These questions are important binary options trading optionsxpress an understanding of what sport psychologists can and can not do in the Legit binary options trading sites of competitive sport. Neuropsychologia 19491503, 1981. 2 Binary option box assessment procedures.Kozlowski, S.
Binary options trading optionsxpress (Eds. Second, binary option trading software study skills enhance students sense of personal control and self- efficacy. Table 25-2.
Gholami, R.Cochrane R. The elemental lumped mass matrix of a linear triangular element is A400A100 MLe 120 4 0 3 0 1 0 (7. In complex tasks, legal interpretations of what optionsxpre ss hard- ship have been quite narrow. Research has shown that the floors should be laid with carpet and that students should always have an unimpaired view of teachers so as to encourage com- munication between students and teachers.
If you are not interested binary options trading optionsxpress the mathematical analysis, you should skip these sections. The map is called a homunculus, meaning little man. Sorting of the table takes place on line 37. This resets any error conditions triggered by the ifstream, and there are two likely error conditions that would arise in this program inability to open the file and reaching the end of the file. 19)-(3. Wetterling T. Generally. REGULATION Binary option brokers accepting paypal THE IMMUNE RESPONSE As noted in Figure 19.
We binary options trading optionsxpress today that many reflex actions, including the neural basis of binary option demo trading, are encoded by the spinal cord.Difference between binary digital options | http://newtimepromo.ru/binary-options-trading-optionsxpress.html | CC-MAIN-2016-50 | en | refinedweb |
Opened 8 years ago
Closed 8 years ago
Last modified 5 years ago
#10647 closed (fixed)
Many-to-many tables are created even with managed=False
Description
What else can I say..
django.db.backends.creation.sql_for_many_to_many has no guards whatsoever.
def sql_for_many_to_many(self, model, style): "Return the CREATE TABLE statments for all the many-to-many tables defined on a model" output = [] for f in model._meta.local_many_to_many: output.extend(self.sql_for_many_to_many_field(model, f, style)) return output
Attachments (2)
Change History (8)
Changed 8 years ago by
comment:1 Changed 8 years ago by
comment:2 Changed 8 years ago by
Changed 8 years ago by
Omit ManyToMany table creation only if both fields are managed.
comment:3 Changed 8 years ago by
comment:4 Changed 8 years ago by
"In the face of ambiguity refuse the temptation to guess." In other words, we should only explicitly omit creation if both are unmanaged.
comment:5 Changed 8 years ago by
comment:6 Changed 5 years ago by
Milestone 1.1 deleted
Note: See TracTickets for help on using tickets.
patch | https://code.djangoproject.com/ticket/10647 | CC-MAIN-2016-50 | en | refinedweb |
As of Kubernetes 1.3, DNS is a built-in service launched automatically using the addon manager cluster add CNAME:
my-svc.my-namespace.svc.cluster.local.
For a headless service, this resolves to multiple answers, one for each pod
that is backing the service, and contains the port number and a CNAME of the pod
of the form
auto-generated-name.my-svc.my-namespace.svc.cluster.local.
Previous versions of kube-dns made names of the for
my-svc.my-namespace.cluster.local (the ‘svc’ level was added later). This
is no longer supported..
With v1.2, users can specify a Pod annotation,
pod.beta.kubernetes.io/hostname, to specify what the Pod’s hostname should be.
The Pod annotation, if specified, takes precendence over the Pod’s name, to be the hostname of the pod.
For example, given a Pod with annotation
pod.beta.kubernetes.io/hostname: my-pod-name, the Pod will have its hostname set to “my-pod-name”.
With v1.3, the PodSpec has a
hostname field, which can be used to specify the Pod’s hostname. This field value takes precedence over the
pod.beta.kubernetes.io/hostname annotation value.
v1.2 introduces a beta feature where the user can specify a Pod annotation,
pod.beta.kubernetes.io/subdomain, to specify the Pod’s subdomain.
The final domain will be “
With v1.3, the PodSpec has a
subdomain field, which can be used to specify the Pod’s subdomain. This field value takes precedence over the
pod.beta.kubernetes.io/subdomain annotation value.
Example:
apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: hostname: busybox-1 subdomain: default. Given a Pod with the hostname set to “foo” and the subdomain set to “bar”, and a headless Service named “bar” in the same namespace, the pod will see it’s own FQDN as “foo.bar.my-namespace.svc.cluster.local”. DNS serves an A record at that name, pointing to the Pod’s IP.
With v1.2, the Endpoints object also has a new annotation
endpoints.beta.kubernetes.io/hostnames-map. Its value is the json representation of map[string(IP)][endpoints.HostRecord], for example: ‘{“10.245.1.6”:{HostName: “my-webserver”}}’.
If the Endpoints are for a headless service, an A record is created with the format
With v1.3, The Endpoints object can specify the
hostname for any endpoint, along with its IP. The hostname field takes precedence over the hostname value
that might have been specified via the
endpoints.beta.kubernetes.io/hostnames-map annotation.
With v1.3, the following annotations are deprecated:
pod.beta.kubernetes.io/hostname,
pod.beta.kubernetes.io/subdomain,
endpoints.beta.kubernetes.io/hostnames-map
Create a file named busybox.yaml with the following contents:
apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - image: busybox command: - sleep - "3600" imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always
Then create a pod using this file:
kubectl create -f busybox.yaml
You can get its status with:
kubectl get pods busybox
You should see:
NAME READY STATUS RESTARTS AGE busybox 1/1 Running 0 <some-time>
Once that pod is running, you can exec nslookup in that environment:
kubectl exec busybox -- nslookup kubernetes.default
You should see something like:
Server: 10.0.0.10 Address 1: 10.0.0.10 Name: kubernetes.default Address 1: 10.0.0.1
If you see that, DNS is working correctly.
If the nslookup command fails, check the following:
Take a look inside the resolv.conf file. (See “Inheriting DNS from the node” and “Known issues” below for more information)
cat /etc/resolv.conf
Verify that the search path and name server are set up like the following (note that seach busybox -- nslookup kubernetes.default Server: 10.0.0.10 Address 1: 10.0.0.10 nslookup: can't resolve 'kubernetes.default'
or
$ kubectl exec
You should see something like:) -c kubedns kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c dnsmasq kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c healthz
See if there is any suspicious log. W, E, F letter
You should see:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE ... kube-dns 10.0.0.10 <none> 53/UDP,53/TCP 1h ...
If you have created the service or in the case it should be created by default but it does not appear, see this debugging services page for more information.
You can verify that dns endpoints are exposed by using the
kubectl get endpoints command.
kubectl get ep kube-dns --namespace=kube-system
You should see something like:.
The running Kubernetes DNS pod holds 3 containers - kubedns, dnsmasq and a health check called healthz. The kubedns process watches the Kubernetes master for changes in Services and Endpoints, and maintains in-memory lookup structures to service DNS requests. The dnsmasq container adds DNS caching to improve performance. The healthz container provides a single health check endpoint while performing dual healthchecks (for dnsmasq and kubedns).
The DNS pod is exposed as a Kubernetes Service with a static IP. Once assigned the
kubelet passes DNS configured using the
--cluster-dns=10.0.0.10 flag to each
container.
DNS names also need domains. The local domain is configurable, in the kubelet using
the flag
--cluster-domain=<default local domain>
The Kubernetes cluster DNS server (based off the SkyDNS library) supports forward lookups (A records), service lookups (SRV records) and reverse IP address lookups (PTR records).
When running a pod, kubelet will prepend the cluster DNS server and search paths to the node’s own DNS settings. If the node is able to resolve DNS names specific to the larger environment, pods should be able to, also. See “Known issues” below for a caveat.
If you don’t want this, or if you want a different DNS config for pods, you can
use the kubelet’s
--resolv-conf flag. Setting it to “” means that pods will
not inherit DNS. Setting it to a valid file path means that kubelet will use
this file instead of
/etc/resolv.conf for DNS inheritance..
Create an Issue
Edit this Page | http://kubernetes.io/docs/admin/dns/ | CC-MAIN-2016-50 | en | refinedweb |
You need to increase the amount of environment space available to the DOS
command interpreter. This can be done by adding the following line to your
config.sys file.
SHELL=c:\windows\command.com c:\windows /P /E:nnnn
Where nnnn is the number of kilobytes of environment space to be created. I
used to just set this number to 4096 when I used Win 98.
Zulq Alam
Searchspace Limited
-----Original Message-----
From: Paul Hayes [mailto:paulmartinhayes@eircom.net]
Sent: 10 November 2001 13:47
To: ant-user@jakarta.apache.org
Subject: installing ant - windows 98 - out of environment space
Hello,
I have downloaded ant and extracted it to c:/ant on my Windows 98
PC. I am trying to run an example given in the JBoss online manual
() to confirm that ant
is working correctly and I'm not having much success.
I have also included a setup.bat in c:/ant to set java_home ;
set ANT_HOME=c:\ant
set JAVA_HOME=c:\jdk1.3
And my path includes ant as well;
PATH=C:\WINDOWS;C:\WINDOWS;C:\WINDOWS\COMMAND;C:\JDK1.3\BIN;C:\PROGRA~1\ULTR
AE~1
;C:\PROGRAM FILES\MTS;C:\PROGRA~1\ULTRAE~1;C:\ANT\BIN
I have copied the jboss example build.xml to C:/dev :
<!-- Simple Ant build script to test an Ant installation -->
<project name="TestInstall" default="run" basedir=".">
<target name="init">
<available file="ASimpleHelloObject.java"
property="ASimpleHelloObject"/>
</target>
<target name="ASimpleHelloObject" unless="ASimpleHelloObject"
depends="init">
<echo file="ASimpleHelloObject.java">
public class ASimpleHelloObject
{
public static void main(String[] args)
{
System.out.println("ASimpleHelloObject.main was called");
}
}
</echo>
<echo message="Wrote ASimpleHelloObject.java" />
</target>
<target name="compile" depends="ASimpleHelloObject">
<javac destdir="." srcdir="." debug="on" classpath=".">
<include name="ASimpleHelloObject.java"/>
</javac>
</target>
<target name="run" depends="compile">
<java classname="ASimpleHelloObject" classpath="." />
<echo message="Ant appears to be successfully installed" />
</target>
</project>
When I run ant from C:\dev, I firstly get the JAVA_HOME not set warning ,
even though I set it in setup.bat mentioned above. If I set JAVA_HOME in
this directory and then run ant I get the following;
Out of environment space
Out of environment space
Out of environment space
Out of environment space
Out of environment space
Bad command or file name
Does anyone know what I'm doing wrong? I'm not overly familiar with
setting environment variables in DOS so if I'm making an elementary error
I'm sorry. I am able to compile java files successfully on my PC. I tried
another example Jboss build (after starting the JBoss server, with differing
results, sometimes I got the same errors as above , while on other
occassions I got the first line of expected output , followed by a DOS error
message , follwed by my machine freezing!!
If anyone has any suggestions I'd be very grateful.
Thanks,
P> | http://mail-archives.apache.org/mod_mbox/ant-user/200111.mbox/%3C002001c169ef$bb1acac0$0100007f@CARIBOU%3E | CC-MAIN-2016-50 | en | refinedweb |
User Tag List
Results 1 to 3 of 3
[rails] Trouble writing a loop that will stop execution
I'm trying to write a function that will accept a rails URL, and see if it matches one of my Rights. Rights are just controller URLs, so I can manage access at the controller level (don't need action level). What I'm trying to do is take the url passed, and see if its access controlled. If it is, the method should stop and return true, if not it will return false. This method is used to check when someone logs out if they can be redirected to where they are, or if they need to be redirected to the home URL, it protects from an infinite login loop.
Code:
# true if this is a protected url that would cause a login loop def looper_url?(url) rights = Right.find(:all) for right in rights if url_for(right) == url return true exit end end--Andrew
AndrewLoe.com - Journal - Photos
- Join Date
- Jul 2004
- Location
- Montreal
- 211
- Mentioned
- 0 Post(s)
- Tagged
- 0 Thread(s)
Hello Andrew,
How about this:
Code:
def looper_url?(url) rights = Right.find(:all) return ! rights.detect {|right| url_for(right) == url }.nil? end
For an isolated code to test the logic:
Code:
def url_for(right) return right end class Right def self.find(param) ['aaa','bbb','ccc'] end end def looper_url?(url) rights = Right.find(:all) return ! rights.detect {|right| url_for(right) == url }.nil? end # will return false puts looper_url?('afaa') # will return true puts looper_url?('aaa')Jean-Marc (aka Myrdhrin)
M2i3 - blog - Protect your privacy with Zliki
Code:
- Join Date
- Aug 2005
- 986
- Mentioned
- 0 Post(s)
- Tagged
- 0 Thread(s)
def looper_url?(url) Right.find(:all).any?{|right| url_for(right) == url} end
Bookmarks | http://www.sitepoint.com/forums/showthread.php?450076-rails-Trouble-writing-a-loop-that-will-stop-execution&p=3246573 | CC-MAIN-2016-50 | en | refinedweb |
Contents
You want to deploy a site that uses Kid templates in a manner that suites more traditional web deployment models. For example, you are working with web developers who are comfortable with HTML templating, possibly are familiar with Kid, but are not Python developers. Additionally you may have very specific requirements with regard to how your site content is hosted, permissions, existing caching systems etc. For these reasons it is more convenient to support a site directory to which Kid template files (.kid) can be uploaded to. The standard mechanism for integrating Kid templates with TurboGears controllers looks like this:
class Root(controllers.RootControllers): @expose(template='yourpythonapp.templates.welcome') def index(self): import time return dict(now=time.time())
This uses the TurboKid plugin to load templates from a Python package that you must provide to wrap the templates.
The example lined out below enables you to rewrite the above as:
class Root(controllers.RootControllers): # bigsitesupport is the name you gave your turbogears application # when you set it up (tg-admin quickstart) @expose(template='bigsitesupport.templates.
First create an import hook that is compatible with PEP 302. This enables your customization to be picked up by TurboKid with out needing to patch its source. The following module, which you should save as kidimport.py inside the package directory of your application, is derived from the import machinery that exists in kid.importer. Note that the following was based on an old version of Kid that only worked with Python 2.4. If you are using Python 2.5 or newer, you need to update this machinery to how it is done in the latest version of Kid.
import os, logging from sys import path_hooks, path_importer_cache from kid.importer import import_template from turbogears import config log = logging.getLogger('bigsitesupport.kidimport') class _Importer(object): def __init__(self, path=None): self.templates = config.get('kid.templates') if not self.templates: raise ImportError self.path = path def find_module(self, fullname): parts = fullname.split('/', 1) if len(parts) < 2 or not parts[0].endswith('.templates'): return filename = os.path.join(self.templates, parts[1].replace('/', os.sep)) filename += '.kid' if not os.path.exists(filename): log.warning("Kid template=True) _installed=False def install_import_hook(): global _installed if not _installed: path_hooks.append(_Importer) path_importer_cache.clear() _installed = True def remove_import_hook(): global _installed if _installed: i = 0 while i < len(path_hooks): if isinstance(path_hooks[i], _Importer): del path_hooks[i] else: i += 1 path_importer_cache.clear() _installed = False
In order to activate the import hook when your application is started, insert the following lines to the commands.py module that already exists in the package directory of your application, just before the line that starts the server with turbogears.start_server(Root()):
from bigsitesupport import kidimport kidimport.install_import_hook()
To get TurboKid to actually use your import hook: Add the lines kid.precompiled = True and kid.templates = '/my/template/dir' to the [global] section of your TurboGears application config file (config/app.cfg), where /my/template/dir is the external directory where you want the Kid templates to be fetched from.
This works because, when you tell TurboKid your template files are ‘precompiled’, TurboKid relies on Python’s standard __import__ machinery to load the template. But to get that far you need to pass the initial sanity check on your template name. Following this example, all your template references in your controller.py start with bigsitesupport.templates, so when TurboKid finds the dot it interprets it as a marker denoting which package your templates logically belong in. TurboKid does not do any further processing on the template name because it then sees that templates are flagged as precompiled and invokes __import__. | http://www.turbogears.org/1.0/docs/NonPackagedKidTemplates.html | CC-MAIN-2016-50 | en | refinedweb |
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
Include dependency graph for config.h:
This graph shows which files directly or indirectly include this file:
Go to the source code of this file.
We try to use it sparingly for the things that we want almost everywhere and are fairly cheap to include.
Definition in file config.h. | http://www.w3.org/2001/06/blindfold/api/config_8h.html | CC-MAIN-2016-50 | en | refinedweb |
Let's say a have
Person
person
person
id
[]
None
[12, ]
id
def get_id_list(person):
try:
return [getattr(person, 'id'), ]
except AttributeError:
return []
Your approach is pythonic, just do some little changes. First off don't return inside the try, you can preserve the
id value in a variable and return it properly within
else clause. The reason is that you are doing multiple operation like getting attribute and converting to list (and maybe more(in other cases)) while you are just catching the
AttributeError. And when you use
else you can handle other exceptions easily, also it's more elegant from coding style's perspective.
Secondly you don't need to use
getattr in this case, you can simply use direct attribute access.
Also instead of using the
try-except you might want to check if the object has the attribute then return its value but due to It's easier to ask forgiveness than it is to get permission. principal the
try-exepct is better.
def get_id_list(person): try: id = person.id except AttributeError: return [] else: return [id] | https://codedump.io/share/QS9bi1mSQtsT/1/what-is-the-quotpythonicquot-way-to-create-a-list-with-one-element-or-just-keep-it-empty | CC-MAIN-2016-50 | en | refinedweb |
The.
The next step in the cookbook is
creating a connection to a WMI namespace.
We create a
WbemLocator and connect it to the desired
namespace.
Step three in the cookbook is
setting the security context on the interface,
which is done with the amusingly-named function
CoSetProxyBlanket.
Once we have a connection to the server,
we can ask it for all (
*) the
information from
Win32_.
We know that there is only one computer in the query, but I'm going to write a loop anyway, because somebody who copies this code may issue a query that contains multiple results.
For each object, we print its Name, Manufacturer, and Model.
And that's it.
Cast is to convert from the bstr_t class to WCHAR, but shouldn't the format string then be L"…"?
@SI, the L"…" would be necessary for wprintf. Anyway, you nailed the exercise: msdn.microsoft.com/…/btdzb8eb.aspx
Especially since most mobo manufacturers put “System manufacturer” in that field (and “System Product Name” in the model field). The motherboard manufacturer field is more reliable.
Ok, today I found out that PWSTR is basically TCHAR*, so it depends on UNICODE being defined.
"we can ask it for all (*) the information"
Nitpicker's corner: Today there is no nitpicker's corner.
> %ls always means an ANSI value
copy/paste error: should read "%ls always means a Unicode value"
We had exactly the opposite problem, our codebase is littered with (const char*)bstr_t(…) wrappers around unicode status messages to convert them to ansi for vsprintf logging calls, instead of using %ls directly.
30-some lines of code to…retrieve a string???
> I found out that PWSTR is basically TCHAR*, so it depends on UNICODE being defined
Hmm… there seems to be some confusion about how L"" and "" etc. work.
printf always uses "…"; this is always ANSI. wprintf always uses L"…"; this is always Unicode.
_tprintf is either printf or wprintf, depending on whether UNICODE/_UNICODE is defined. This always uses TEXT("…").
Consider the following:
printf("foo"); // OK
wprintf(L"foo"); // OK
_tprintf(TEXT("foo")); // OK
The other six possibilities (e.g. wprintf(TEXT("foo"))) are all either compiler or stylistic errors.
%ls corresponds to a value which is a Unicode string; %hs corresponds to a value which is an ANSI string. %s by itself corresponds to a string **which is of the same type as the format string itself** (regardless of whether UNICODE/_UNICODE is defined.)
Let us suppose that the string we are trying to print contains some non-ANSI characters, e.g.: Contosó. Consider the following:
printf("%s", "foo"); // OK; %s in an ANSI format string means an ANSI value
printf("%hs", "foo"); // OK; %hs always means an ANSI value
printf("%ls", L"foo"); // OK; %ls always means an ANSI value
printf("%ls", L"Contosó"); // Iffy; prints "Contoso", Unicode value is downconverted to ANSI format string
wprintf(L"%s", L"foo"); // OK
wprintf(L"%s", L"foo"); // OK
wprintf(L"%hs", "foo"); // OK
wprintf(L"%ls", L"Contosó"); // OK (prints Contosó)
So I think Raymond's printf("%ls", (LPCWSTR)GetPropertyValue(…)) is iffy, because any Unicode data in the property value would be downconverted to ANSI. I would prefer wprintf(L"%ls", (LPCWSTR)GetPropertyValue(…)).
I overlooked the l in the %ls, that makes much more sense than the PWSTR forcing the compiler to use the char* operator due to current mode. But if we are converting it down to ansi anyhow, why not use the const char* operator present in the bstr_t class, which caches the copy?
@Maurits [MSFT]: wprintf(L"%s", L"foo"); // OK
This will not work on a POSIX system, %s is _always_ char* there. The MS implementation is much less painful to work with and allows both TCHAR types to build from the same source.
@skSdnW: If you want portable code, or escape bloated COM, you have to use DMI. Then POSIX may be relevant.
ho, the static cast is necessary not because of wchar/tchar/char issues (BSTR is an OLESTR is a WCHAR always ) but because printf is a variable parameter list and the conversion for bstr_t to use to push on the stack is ambiguous. would not have been a problem if it were a BSTR directly
@skSdnW: Well, there's little reason to fiddle with UTF16 on POSIX systems. UTF8 is the variable byte encoding of choice, and UTF32 handles full codepoints, so no need to take any compromise solution which is neither ascii-compatible nor fixed width. But if you really have to, you can use the proper defines…
@J. Peterson: If you only need to retrieve WMI data, use the right tool for the job (PowerShell)! In other news, you can write a Windows program in assembly code, but it will take many more lines of code than C.
This is mildly off-topic.
WMI CIM Studio (which lets you browse and modify WMI objects) is implemented as a web page containing ActiveX controls. The only application that can host this (as far as I know) is Internet Explorer. Unfortunately, it has stopped working in IE 11.
Does anyone know either a) how to get it to work or b) an alternative WMI browser?
@laonianren: Host the ActiveX browser control in a vb6 app and use that to load the page.
This is pretty neat, though, is it not optimistic to expect a PC to have a manufacturer? So many PCs are built by their owners anyway! It returns this on my system:
Name = T-PC
Manufacturer = To Be Filled By O.E.M.
Model = To Be Filled By O.E.M.
@Raymond, that makes sense! Next part: how to modify this information :).
@Deduplicator: Who said anything about UTF16? wchar_t is usually 32bit on other systems.
The point is, working with printf functions where %s does not match the type of the format string is annoying if the code is going to be used on Windows and POSIX…
While we're on the subject of Unicode, how about also using wmain instead of main? =)
extern "C" int __cdecl wmain(int argc, wchar_t **argv, wchar_t **envp)
Well, we have seven "To Be Filled By O.E.M." for both Manufacturer and Model plus 5 for only Model (there Manufacturer = Mainboard Manufacturer). All bought at a local vendor (not everyone buys dell-only ;-))
But it sure would be nice if at least the big ones could get their names consistent: "HP", "Hewlett-Packard", "Hewlett-Packard Company", the same for Dell, Siemens etc. So you need an additional step consolidating anyway, plus regular maintenance when they come up with something different.
To the program: just exec() wmiq and parse the result string. No need to handle wmi yourself! ;-)
@skSdnW: The worst part under POSIX (or really, under the ISO/IEC C standard) is not that %s is "always ANSI, all the time" (equivalent to %hs). The worst is that there is NO WAY AT ALL to specify a string argument "The same width as the function"!
Quite frankly, I think that part of the C standard is dumb and I was flabbergasted when I discovered it. The Windows way is much easier to use (especially with the late addition to the C++ standard that says L"blah" "blah" is no longer a string width mismatch, invaluable when working with macros).
[It will probably take you even more lines of code to get the number of unread messages in the user's Yahoo inbox, and that's just a 32-bit integer! -Raymond]
I'm pretty sure that's 6 lines.
int r;
char buf[4096];
snprintf(buf, 4096, "wget -O – https://… ", username, stored_password);
FILE *f = popen(buf, "r");
fscanf(f, "%d", &r);
return r;
Joshua: I'm pretty sure you must first log in with OAuth, then parse the response, and send the appropriate cookie in your inbox query. The most complicated part is probably parsing the response to the authentication request.
I didn't think I was going to answer, but Gabe did so I will. The last time I looked up the call for any such thing OAuth didn't even exist.
[If not, then I can do it in one line of C++: system("wmic computersystem get name, manufacturer, model"); -Raymond]
The only reason I think you cheated is you didn't parse the result.
[The program takes no command line arguments. Who cares! (Don't forget people: Little Program.) -Raymond]
Butbutbut it saves 512 bytes and starts up unnoticeably faster!…… =)
I just make it a habit to use wmain / wWinMain so I don't mess up for real programs. =^-^=
C:Projectstemptests>echo extern "C" int __cdecl main() { return 0; } > unicode.cpp
C:Projectstemptests>cl /MT /Ox /nologo unicode.cpp > nul
C:Projectstemptests>ls -l unicode.exe
-rwxrwxrwx 1 user group 36864 Jan 8 14:33 unicode.exe
C:Projectstemptests>echo extern "C" int __cdecl wmain() { return 0; } > unicode.cpp
C:Projectstemptests>cl /MT /Ox /nologo unicode.cpp > nul
C:Projectstemptests>ls -l unicode.exe
-rwxrwxrwx 1 user group 36352 Jan 8 14:34 unicode.exe
int main() {
std::cout << "Plz run yr favourite Yahoo! mail reader and enter the number of Inbox messages, followed by the Enter key: ";
char buf[4]; gets(buf); std::cout << "Result: " << buf << std::endl;
}
Note that the first line can be omitted by taking advantage of long file name support: name the program the same as the intro line (remove/replace unsupported characters) and its usage is self-documenting. Additionally the program becomes even more flexible and re-usable since the name of the program (or shortcuts to it) can be changed to reflect the desired task. Finally, this snippet does not include external code executed and human motor/brain activity from computer start to user entry.
@Total spirit violator:
I like (1) the use of gets, one of the worst functions ever; (2) the assumption that the number of inbox messages is at most 3 digits (definitely not true for many people); (3) the juxtaposition of gets with c++ iostreams.
Evan: Sure, 3 chars are allocated to the buffer, but what's really going to happen if there are more? Crash on exit? At that point the program has already done its job. | https://blogs.msdn.microsoft.com/oldnewthing/20140106-00/?p=2163/ | CC-MAIN-2016-50 | en | refinedweb |
PerlPoint::Tags - processes PerlPoint tag declarations
This manual describes version 0.05.
#. See below for details.
So to declare tags, just write a module in the PerlPoint::Tags namespace and make it a subclass of PerlPoint::Tags:
# declare a tag declaration package package PerlPoint::Tags::New; # declare base "class" use base qw(PerlPoint::Tags);
Now the tags can be declared. Tag declarations are expected in a global hash named %tags. Each key is the name of a tag, while descriptions are nested structures stored as values.
# pragmata use strict; use vars qw(%tags %sets); # tag declarations %tags=( EMPHASIZE => { # options options => TAGS_OPTIONAL, # don't miss the body! body => TAGS_MANDATORY, }, COLORIZE => {...}, FONTIFY => {...}, ... );
This looks complicated but is easy to understand. Each option is decribed by a hash. The body slot just expresses if the body is obsolete, optional or mandatory. This is done by using constants provided by PerlPoint::Constants. Obsolete bodies will not be recognized by the parser.
The body slot may be omitted. This means the body is optional.
There are the same choices for options in general: they may be obsolete, optional or mandatory. If the slot is omitted this means that the tag does not need any options. The parser will not accept a tag option part in this case.
To sum it up, options and body of a tag can be declared as mandatory by
TAGS_MANDATORY, optional by
TAGS_OPTIONAL, or obsolete by
TAGS_DISABLED.
If you need further checks you can hook into the parser by using the
"hook" key:
%tags=( EMPHASIZE => { # options options => TAGS_OPTIONAL, # perform special checks hook => sub { # get parameters my ($tagname, $options, $body, $anchor)=@_; # checks $rc=... reply results $rc; } }, COLORIZE => {...}, FONTIFY => {...}, ... );
An option hook function receives the tag name, a reference to a hash of option name / value pairs to check, a body array reference and an anchor object. Using the option hash reference, the hook can modify the options. The passed body array is a copy of the body part of the stream. The hook therefore cannot modify the body part on parsers side. The anchor object can be used to store new anchors or query anchors already known, see \
PerlPoint::Anchors for details of this objects interface.
The following return codes are defined:
Parsing will be stopped successfully.
The parser will throw away the tag and all its content.
A semantic error occurred. This error will be counted, but parsing will be continued to possibly detect even more errors.
A syntactic error occured. Parsing will be stopped immediately.
The parser will ignore the tag, but stream the body. The result is similar to a source omitting the tag.
The checked object is declared to be OK, parsing will be continued.
Hooks are an interesting way to extend document parsing, but please take into consideration that tag hooks might be called quite often. So, if checks have to be performed, users will be glad if they are performed quickly.
And there is another hook interface. It might happen that several operations need parsing to be completed before they can start, like checking an referenced anchor which might be defined after the referencing tag. To handle such situations, a subroutine can be declared as value of key
finish. The parser will invoke this code when parsing is done and the tag was parsed successfully. (So if a
hook function returned an error code, the
finish hook will be ignored.)
Here is an example (from an implementation of the basic tag \REF):
# afterburner finish => sub { # declare and init variable my $ok=PARSING_OK; # take parameters my ($options, $anchors)=@_; # check link for being valid unless (my $anchor=$anchors->query($options->{name})) { $ok=PARSING_FAILED, warn qq(\n\n[Error] Unknown link address "$options->{name}."\n); } else { # link ok, get value (there is only one key/value pair # in the received hash) ($options->{__value__})=(values %$anchor); } # supply status $ok; },
Because several informations are no longer available after parsing, finish hooks have a different interface. They receive options and anchors like parsing hooks, but no line number and no body information.
Options can be modified as well as in parsing hooks. Return codes are the same, but are evaluated slightly different according to the invokation time:
All right. This code is accepted for reasons of convenience, it is recommended to use
PARSING_OK instead.
The backend will ignore the tag and all its contents (which means its body).
A semantic error occurred. This error will be counted.
An error occured. Because parsing is already finished, this will be counted as an sematic error.
This code is accepted for reasons of convenience, it is recommended to use
PARSING_ERROR instead.
The backend will ignore the tag, but process its body. This simply means that the tag takes no effect.
All right.
The order of finish hook invokation can differ from the order of tag usage. Do not depend on it.
A finish hook is not invoked unless the tag was processed and streamed successfully at parsing time. This simply means if the parsing hook returned
PARSING_OK, or if there was no parsing hook at all.
A tag can be part of various paragraphs. A single tag in a paragraph with no prefix produces a text paragraph containing just this tag. This can be intended, but there are other cases when the tag should stand for its own.
The standalone attribute instructs the parser to strip off the wrapping paragraph from a handle that is used as its only content. If there is more content in the paragraph the paragraph wrapper will not be removed.
The flag should be set to a true value to activate the occasional paragraph stripping.
Example:
standalone => 1,
One can invoke hooks of any other registered tag. This is powerful, but dangerous. Nevertheless, it helps to emulate other tags, for example if an old interface (tag and option names) shall be preserved but the new functionality shall be used (without being copied between tag modules). To invoke a foreign hook, call \
PerlPoint::Tags::call() (fully qualified function name) with tag name, hook type and parameters, like so:
$rc=PerlPoint::Tags::call('TAG', 'hook', @_);
Valid hook types are "hook" and "finish" (currently). If the tag is not registered, or has no hook of the specified type, an undefined value is supplied, otherwise the return code of the invoked function.
It is not checked if you call a "hook" function from a "finish" hook or vice versa. Take care!
This feature is made available to interchange hooks between several tag definition modules. If you want to share hook functions between tags declared by the same tag module, it is recommended to use common Perl techniques.
Now, in a translator software where a parser object should be built, tag declarations can be accessed by simply loading the declaration modules, just as usual (there is no need to load PerlPoint::Tags directly there, unless the converter should run under perl 5.005 which needs this parent module to be loaded explicitly (while perl 5.6 does is implicitly)):
# declare all the tags to recognize use PerlPoint::Tags::New;
This updates a structure in the PerlPoint::Tags namespace. The parser knows about this structure and will automatically evaluate it.
Several declaration modules can be loaded subsequently. Each new tag is added to the internal structure, while predeclared tags are overwritten by new declarations.
# declare all the tags to recognize use PerlPoint::Tags::Basic; use PerlPoint::Tags::HTML; use PerlPoint::Tags::SDF; use PerlPoint::Tags::New;
Certain translators might only want to support subsets of tags declared in a PerlPoint::Parser submodule. This is possible as well, similar to the usual importing mechanism:
# declare all the tags to recognize use PerlPoint::Tags::New qw(COLORIZE);
This does only declare the
COLORIZE tag, but ignores
EMPHASIZE and
FONTIFY.
To simplify activation of certain but numerous tags a declaration module can group them by setting up a global hash named
%sets.
%sets=( pointto => [qw(EMPHASIZE COLORIZE)], );
This allows a translator autor to activate
EMPHASIZE and
COLORIZE at once:
# declare all the tags to recognize use PerlPoint::Tags::New qw(:pointto);
The syntax is borrowed from the usual import mechanism.
Tag sets can overlap:
%sets=( pointto => [qw(EMPHASIZE COLORIZE)], set2 => [qw(COLORIZE FONTIFY)], );
And of course they can be nested:
%sets=( pointto => [qw(EMPHASIZE COLORIZE)], all => [(':pointto', qw(FONTIFY))], );
As PerlPoint provides a flexible way to write translators, PerlPoint documents might be written with tags for a certain translator and later then be processed by another translator which does not support all the original tags. Of course, the second translator does not need to handle these tags, but the parser needs to know they should be recognized. On the other hand, it cannot know this from the declarations made by the second translator itself, because they of course do not contain the tags of the first translator.
The problem could be solved if there would be a way to inform the parser about the tags initially used. That's why this module provides addTagSets(), a method that imports foreign declarations at run time. Suppose a translator provides an option
-tagset to let a user specify which tag sets the document was initially written for. Then the following code makes them known to the parser, addtionally to the declarations the translator itself already made as usual (see above):
# load module to access the function use PerlPoint::Tags; # get options ... # import foreign tags PerlPoint::Tags::addTagSets(@{$options{tagset}}) if exists $options{tagset};
(Note: this example is based on the Getopt::Long option access interface. Other interfaces might need adaptations.)
Tags imported via
addTagSets() do not overwrite original definitions.
A "tag set", in this context, is the set of tag declarations a certain translator makes. So, the attributes to addTagSets() are expected to be target languages corresponding to the translators name, making usage easy for the user. So, pp2sdf is expected to provide a "tag set" declaration module PerlPoint::Tags::SDF, pp2html PerlPoint::Tags::HTML, pp2xml PerlPoint::Tags::XML and so on.
If all translators provide this same interface, usage should be easy. A user who wrote a document with
pp2html in mind, passing it to
pp2sdf which provides significantly less tags, only has to add the option
"-tagset HTML" to the
pp2sdf call to make his document pass the PerlPoint parser.
Imports tagsets. See "Allowing translator users to import foreign tag declarations" for details.
Calls a hook function of a registered tag. See "Using other tag definitions" for details.
The form of tag declaration provided by this module is designed to make tag activation intuitive to write and read. Ideally, declarations are written by one author, but used by several others.
Each tag declaration module should provide a tag description in PerlPoint. This allows translator authors to easily integrate tag descriptions into their own documentations.
Tag declarations have nothing to do with the way backends (translators) handle recognized tags. They only enable tag detection and a few simple semantic checks by the parser. A translator has still to implement its tag handling itself.
There are no tag namespaces. Although Perl modules are used to declare the tags, tags declared by various
PerlPoint::Tags::Xyz share the same one global scope. This means that different tags should be named different. PerlPoint::Tags displays a warning if a tag is overwritten by another one.
The parser module working on base of the declarations.
Various declaration modules.. | http://search.cpan.org/~jstenzel/PerlPoint-Package-0.45/lib/PerlPoint/Tags.pm | CC-MAIN-2016-50 | en | refinedweb |
Everything was going so well today, too....
Advertising
Zope was installed as part of the Plone 2 installer on Win2K. Has been running fine for several months.
I was adding some issues to my collector, and things started hanging up on my system. After reboot, the Zope service did not start, trying to start it manually through the service manager gives
"could not start the zope instance at c:\program files\Plone2\Data service on Local Computer. The service did not return an error. There could be an internal windows error or an internal service error."
the last three messages in the error log are:
total failure getting the folderlisting action for the folder <>
trying to restart the server via the service manager does not create a new event log.
I can start Zope manually via runzope.bat, and it will work, though it gives me this error:
C:\Program Files\Plone 2\Zope\lib\python\Zope\Startup
\run.py" -C "C:\Program Fil
es\Plone 2\Data\etc\zope.conf"
Traceback (most recent call last):
File "C:\Program Files\Plone 2\Zope\lib\python\Products\CMFBoard\Forum.py", li
ne 34, in ?
from ForumMailReceiver import MailReceiver
File "C:\Program Files\Plone 2\Zope\lib\python\Products\CMFBoard\ForumMailRece
iver.py", line 22, in ?
from Products.PortalTransport.interfaces.mailreceiver import IMailReceiver
ImportError: No module named PortalTransport.interfaces.mailreceiver
Traceback (most recent call last):
File "C:\Program Files\Plone 2\Zope\lib\python\Products\CMFBoard\Forum.py", li
ne 41, in ?
from Subscribeable import Subscribeable
File "C:\Program Files\Plone 2\Zope\lib\python\Products\CMFBoard\Subscribeable
.py", line 18, in ?
from Products.PortalTransport.SubscribeableMember import SubscribeableMember
ImportError: No module named PortalTransport.SubscribeableMember
Help! I was using that Zope instance as a spare brain, and my last backup is....[expletive deleted]... too old to be useful.
CMFBoard apparently can use a 'PortalTransport' product for doing something with emails. In my version, the import on Forum.py line 34::
from ForumMailReceiver import MailReceiver
is wrapped in a try/except so it doesn't cause problems if absent. As is Subscribable on line 41. When these are absent, they print out the exceptions, but they are not fatal. These are not your problem.
Some different diagnostic information is needed. Can you access the server when starting with runzope?
--jcc
_______________________________________________
Zope maillist - Zope@zope.org
** No cross posts or HTML encoding! **
(Related lists - ) | https://www.mail-archive.com/zope@zope.org/msg16189.html | CC-MAIN-2016-50 | en | refinedweb |
.common.propertyeditor;19 20 import java.util.Collection ;21 import java.util.HashSet ;22 import java.util.Set ;23 24 /**25 * A property editor for {@link Set}.26 *27 * @version $Rev: 476457 $ $Date: 2006-11-18 01:49:20 -0500 (Sat, 18 Nov 2006) $28 */29 public class SetEditor extends AbstractCollectionEditor {30 31 protected Collection createCollection() {32 return new HashSet ();33 }34 35 }36
Java API By Example, From Geeks To Geeks. | Our Blog | Conditions of Use | About Us_ | | http://kickjava.com/src/org/apache/geronimo/common/propertyeditor/SetEditor.java.htm | CC-MAIN-2016-50 | en | refinedweb |
Opened 10 years ago
Closed 9 years ago
#3083 closed defect (fixed)
MySQL IntegrityError Exception on object creation
Description
When two users try to register _simulaneously_ with the same username, they both pass validation (because at that moment nobody of them is registered), but when we do actual user.save(), one of them gets MySQL IntegrityError (Duplicate Key Error). Yes, I can handle it,
from MySQLdb import IntegrityError try: user.save() except IntegrityError: errors['username'] = 'User with this username already exists',
but there is dependence to MySQL. I can't simply do "except:", because there can be another trouble, such as no connectivity to database during restart, but user would get confusing message about invalid username. The same thing is with all models.
Change History (6)
comment:1 Changed 10 years ago by
comment:2 Changed 10 years ago by
comment:3 Changed 10 years ago by
Sorry, I didn't see that Adrian put it into "Database Wrapper" category. It could be there, or generic views, or admin ;-)
comment:4 Changed 10 years ago by
I'm not a fan of Django magically handling IntegrityError(s), there are different ways to handle them and picking just one won't be very flexible, though I don't use generic views for my add/edit/update so I'm a bit biased. Like I mentioned on Django-users I think the best approach to this is simply to remove the dependency on the db module by making IntegrityError available directly through django.db, I'll submit a patch that does this soon.
comment:5 Changed 9 years ago by
comment:6 Changed 9 years ago by
jordanl: yes, it does.
I propose to do it like this, so there's no need to interprete database error messages:
But perhaps someone has a better idea. If there's a good pattern, maybe the generic views could use it, too?
Since there's a multitude of ways to solve this and it is not clear how to proceed (Adrian has rejected the above solution), I only put this into "Design decision needed".
Some references from django-users: | https://code.djangoproject.com/ticket/3083 | CC-MAIN-2016-50 | en | refinedweb |
1
I have executed the below programm on both C and C++ compiler and got different results
#include <stdio.h> int main() { const int a=10; int *p=(int*)(&a); *p = 20; printf("address of a=%u\t%d\n", &a,a); printf("address of p=%u\t%d\n", p,*p); return 0; }
With C compiler I got the output
address of a=1245052 20
address of p=1245052 20
With C++ compiler I got
address of a=1245052 10
address of p=1245052 20
Can anybody tell me why the different value for *p and a even thiough both are pointing to the same memory location? | https://www.daniweb.com/programming/software-development/threads/53814/const-difference-in-c-and-c | CC-MAIN-2016-50 | en | refinedweb |
.loader;20 21 import org.apache.tools.ant.AntClassLoader;22 23 /**24 * @deprecated since 1.725 * Just use {@link AntClassLoader} itself.26 */27 public class AntClassLoader2 extends AntClassLoader {28 /** No args constructor. */29 public AntClassLoader2() {30 }31 }32
Java API By Example, From Geeks To Geeks. | Our Blog | Conditions of Use | About Us_ | | http://kickjava.com/src/org/apache/tools/ant/loader/AntClassLoader2.java.htm | CC-MAIN-2016-50 | en | refinedweb |
Last thursday, 30 December, I updated a site at 16:30. Normally, updating a site at such an hour is a standing invitation to Murphy for wreaking havoc. But this was “just a small update”, so I was fine, right?
Boom. In my
search_indexes.py, mod_wsgi complained:
Traceback (most recent call last): from haystack import indexes ImportError: cannot import name indexes
What? It worked fine just before. Nothing related to any imports seemed to have changed. In any case not in the models.py, settings,py or that search_indexes.py. An obvious candidate for such an error is a circular import, but I couldn’t find any.
Today, I googled and found one page: a closed haystack issue. I couldn’t destill a clear reason for the failure from the discussion there, to be honest. Race condition, apparently? Perhaps a hidden circular dependency through 4 different levels? I don’t know.
Anyway, the temporary solution is to do haystack’s autodiscovering not in
an
search_sites.py, but to put the:
import haystack haystack.autodiscover()
into your
urls.py instead. I did it and that worked. But it doesn’t sit
well with me. It seems to work and there’s a deadline next week, so I’ll
leave it this way for now. But I sure hope someone replies on the): | http://reinout.vanrees.org/weblog/2011/01/03/haystack-importerror.html | CC-MAIN-2016-50 | en | refinedweb |
HandsFreeNavigation
Since: BlackBerry 10.3.0
#include <bb/platform/HandsFreeNavigation>
To link against this class, add the following line to your .pro file: LIBS += -lbbplatform
The set of possible hands-free navigation values.
Overview
Public Types Index
Public Types
The set of possible hands-free navigation values.
BlackBerry 10.3.0
- Undefined 0
The hands-free navigation has not been set.
- Enabled 1
Enable hands-free navigation experience.Since:
BlackBerry 10.3.0
- Disabled 2
Disable hands-free navigation experience.Since:
BlackBerry 10.3.0
Got questions about leaving a comment? Get answers from our Disqus FAQ.comments powered by Disqus | https://developer.blackberry.com/native/reference/cascades/bb__platform__handsfreenavigation.html | CC-MAIN-2016-50 | en | refinedweb |
On 11.10.2016 13:50, Vladimir Sementsov-Ogievskiy wrote: > On 01.10.2016 17:34, Max Reitz wrote: >> On 30.09.2016 12:53, Vladimir Sementsov-Ogievskiy wrote: >>> Create block/qcow2-bitmap.c >>> Add data structures and constraints accordingly to docs/specs/qcow2.txt >>> >>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> >>> --- >>> block/Makefile.objs | 2 +- >>> block/qcow2-bitmap.c | 47 >>> +++++++++++++++++++++++++++++++++++++++++++++++ >>> block/qcow2.h | 29 +++++++++++++++++++++++++++++ >>> 3 files changed, 77 insertions(+), 1 deletion(-) >>> create mode 100644 block/qcow2-bitmap.c >>> >>> diff --git a/block/Makefile.objs b/block/Makefile.objs >>> index fa4d8b8..0f661bb 100644 >>> --- a/block/Makefile.objs >>> +++ b/block/Makefile.objs >>> @@ -1,5 +1,5 @@ >>> block-obj-y += raw_bsd.o qcow.o vdi.o vmdk.o cloop.o bochs.o vpc.o >>> vvfat.o dmg.o >>> -block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o >>> qcow2-snapshot.o qcow2-cache.o >>> +block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o >>> qcow2-snapshot.o qcow2-cache.o qcow2-bitmap.o >>> block-obj-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o >>> qed-cluster.o >>> block-obj-y += qed-check.o >>> block-obj-$(CONFIG_VHDX) += vhdx.o vhdx-endian.o vhdx-log.o >>> diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c >>> new file mode 100644 >>> index 0000000..cd18b07 >>> --- /dev/null >>> +++ b/block/qcow2-bitmap.c >>> @@ -0,0 +1,47 @@ >>> +/* >>> + * Bitmaps for the QCOW version 2 format >>> + * >>> + * Copyright (c) 2014-2016 Vladimir Sementsov-Ogievskiy >>> + * >>> + * This file is derived from qcow2-snapshot.c, original copyright: >>> + * Copyright (c) 2004-2006 Fabrice Bell: BME here means Bitmaps Extension and used as a namespace for >>> + * _internal_ constants. Please do not use this _internal_ >>> abbreviation for >>> + * other needs and/or outside of this file. */ >>> + >>> +/* Bitmap directory entry constraints */ >>> +#define BME_MAX_TABLE_SIZE 0x8000000 >>> +#define BME_MAX_PHYS_SIZE 0x20000000 /* 512 mb */ >> I suppose BME_MAX_TABLE_SIZE (8M) is greater than BME_MAX_PHYS_SIZE (512 >> MB) divided by the cluster size (>= 512; 512 MB / cluster_size <= 1 MB) >> because fully zero or one clusters do not require any physical space? >> >> Makes some sense, but I can see that this might make give some trouble >> when trying to serialize overly large bitmaps. But I guess that comes >> later in this series, so I'll wait for that point. >> >> Another thing is that 512 MB is rather big. It gets worse: The bitmap >> may only require 512 MB on disk, but with a maximum table size of 8 MB, >> it can require up to 8M * cluster_size in memory (with just 64 MB of >> disk space!) by using the "read as all zeroes" or "read as all ones" >> flags. With the default cluster size of 64 kB, this would be 512 GB in >> RAM. That sounds bad to me. >> >> Well, it is probably fine as long as the bitmap is not auto-loaded... >> But we do have a flag for exactly that. So it seems to me that a >> manipulated image can easily consume huge amounts of RAM on the host. >> >> So I think we also need some sane limitation on the in-RAM size of a >> bitmap (which is BME_MAX_TABLE_SIZE * cluster_size, as far as I >> understand). The question of course is, what is sane? For a server >> system with no image manipulation possible from the outside, 1 GB may be >> completely fine. But imagine you download some qcow2 image to your >> laptop. Then, 1 GB may not be fine, actually. >> >> Maybe it would make sense to use a runtime-adjustable limit here? > > Actualy BME_MAX_PHYS_SIZE is this limit: > in check_constraints we have > > uint64_t phys_bitmap_bytes = > (uint64_t)h->bitmap_table_size * s->cluster_size; > > ... > > (phys_bitmap_bytes > BME_MAX_PHYS_SIZE) ||
Advertising
OK, so BME_MAX_PHYS_SIZE is actually supposed to be the limit of the size of the bitmaps in RAM? And I suppose it is going to be calculated differently in the future once qemu has sparse bitmap support? My fault, then, I thought BME_MAX_PHYS_SIZE was supposed to be the limit of the size on disk. OK, makes sense then, but the question whether a runtime-adjustable limit would make sense still remains. OTOH, this is something that can always be added later on. Max
signature.asc
Description: OpenPGP digital signature | https://www.mail-archive.com/qemu-devel@nongnu.org/msg405143.html | CC-MAIN-2016-50 | en | refinedweb |
Epetra_SerialDenseSolver: A class for solving dense linear problems. More...
#include <Epetra_SerialDenseSolver.h>
Epetra_SerialDenseSolver: A class for solving dense linear problems..
When you should use Epetra_SerialDenseSolver: If you want to (or potentially want to) solve ill-conditioned problems or want to work with a more object-oriented interface, you should probably use Epetra_SerialDenseSolver... | http://trilinos.sandia.gov/packages/docs/r10.4/packages/epetra/doc/html/classEpetra__SerialDenseSolver.html | CC-MAIN-2014-35 | en | refinedweb |
Let’s see Pyramid Layout in action with the demo application provided in demo.
Normal Pyramid stuff:
Now let’s look at some of the code.
Pyramid Layout defines configuration directives and decorators you can use in your project. We need those loaded into our code. The demo does this in the etc/development.ini file:
pyramid.includes = pyramid_debugtoolbar mako.directories = demo:templates
The development.ini entry point starts in demo/__init__.py:
This is all Configurator action. We register a route for each view. We then scan our demo/layouts.py, demo/panels.py, and demo/views.py for registrations.
Let’s start with the big picture: the global look-and-feel via a layout:
The @layout_config decorator comes from Pyramid Layout and allows us to define and register a:
<title>${layout.project_title}, from Pylons Project</title>
Here we see an important concept and some important magic: the template has a top-level variable layout available. This is an instance of your layout class.
For the ZPT crowd, if you look at the master template in demo/templates/layouts/layout.pt, you might notice something weird at the top: there’s no metal:define-macro. Since Chameleon allows a template to be a top-level macro, Pyramid Layout automatically binds the entire template to the macro named main_template.
How does your view know to use a layout? Let’s take a look.
Our demo app has a very simple set of views:
We again have one callable with 3 stacked decorators. The decorators are all normal Pyramid @view_config stuff.
The second one points at a Chameleon template in demo/templates/home.pt:
<metal:block <div metal: <!-- Main hero unit for a primary marketing message or call to action --> ${panel('hero', title='Chameleon')} <!-- Example row of columns --> <div class="row"> <p>${panel('headings')}</p> </div> <div class="row"> <p>${panel('contextual_panels')}</p> </div> <div class="row"> <h2>User Info</h2> <p>${panel('usermenu', user_info={ 'first_name': 'Jane', 'last_name': 'Doe', 'username': 'jdoe'} )}</p> </div> </div> </metal:block>
The first line is the one that opts the template into the layout. In home.jinja2 that line looks like:
{% extends main_template %}
For both of these, main_template is inserted by Pyramid Layout, via a Pyramid renderer global, into the template’s global namespace. After that, it’s normal semantics for that template language.
Back to views.py. The view function grabs the Layout Manager, which Pyramid Layout conveniently stashes on the request. The LayoutManager‘s primary job is getting/setting the current layout. Which, of course, we do in this function.
Our function then grabs the layout instance and manipulates some state that is needed in the global look and feel. This, of course, could have been done in our AppLayout class, but in some cases, different views have different values for the headings.
Our main template has something interesting in it:
<body> ${panel('navbar')} <div class="container"> ${next.body()} <hr> <footer> ${panel('footer')} </footer> </div> <!-- /container --> <!-- Le javascript ================================================== --> <!-- Placed at the end of the document so the pages load faster --> <script src="${request.static_url('demo:static/js/jquery-1.8.0.min.js')}"></script> <script src="${request.static_url('demo:static/js/bootstrap.min.js')}"></script> </body>
Here we break our global layout into reusable parts via panels. Where do these come from? @panel_config decorators, as shown in panels.py. For example, this:
${panel('navbar')}
...comes from this:
@panel_config( name='navbar', renderer='demo:templates/panels/navbar.mako' ) def navbar(context, request): def nav_item(name, url): active = request.current_route_url() == url item = dict( name=name, url=url, active=active ) return item nav = [ nav_item('Mako', request.route_url('home.mako')), nav_item('Chameleon', request.route_url('home.chameleon')), nav_item('Jinja2', request.route_url('home.jinja2')) ] return { 'title': 'Demo App',
The @panel_config registered a panel under the name navbar, which our template could then use or override.
The home.mako view template has a more interesting panel:
${panel('hero', title='Mako')}
...which calls:
This shows that a panel can be parameterized and used in different places in different ways. | http://docs.pylonsproject.org/projects/pyramid-layout/en/latest/demo.html | CC-MAIN-2014-35 | en | refinedweb |
So You Think You Can Polymorph?
Now you may already think you understand polymorphism—and perhaps you do—but I’ve found that most software developers don’t actually understand exactly what polymorphism is.
What is polymorphism?
How many times have you been asked this question during a job interview?
Do you actually know confidently what the right answer is?
Don’t worry, if you are like most developers out there in the world you probably have this feeling that you know what polymorphism is, but are unable to give a clear and concise definition of it.
Most developers understand examples of polymorphism or one particular type of polymorphism, but don’t understand the concept itself.
Allow me to clarify a bit.
What I mean by this is that many times when I ask about polymorphism in an interview, I get a response in the form of an example:
Most commonly a developer will describe how a shape base class can have a circle derived class and a square derived class and when you call the draw method on a reference to the shape base class, the correct derived class implementation of draw is called without you specifically having to know the type.
While this is technically a correct example of runtime polymorphism, it is not in any way concise, nor is it a definition of the actual term.
I myself have described polymorphism in a similar fashion in plenty of job interviews.
True understanding
The problem with just that example as an explanation is that it lacks true understanding of the concept.
It is like being able to read by memorizing words, while not understanding the concepts of phonetics that underlie the true concept of reading.
A good test for understanding a concept is the ability to create a good analogy for that concept.
Oftentimes if a person cannot come up with an analogy to describe a concept, it is because they lack the true understanding of what the concept is.
Analogies are also an excellent way to teach concepts by relating things to another thing that is already understood.
If right now you can’t come up with a real world analogy of polymorphism, don’t worry you are not alone.
A basic definition
Now that we understand why most of us don’t truly understand polymorphism, let’s start with a very basic concise definition.
Polymorphism is sharing a common interface for multiple types, but having different implementations for different types.
This basically means that in any situation where you have the same interface for something but can have different behavior based on the type, you have polymorphism.
Think about a Blu-ray player.
When you put a regular DVD in the player what happens?
How about when you put a Blu-ray disc in the player?
The interface of the player is the same for both types of media, but the behavior is different. Internally, there is a different implementation of the action of playing a disc depending on what the type is.
How about a vending machine?
Have you ever put change into a vending machine?
You probably put coins of various denominations or types in the same slot in the machine, but the behavior of the machine was different depending on the type.
If you put a quarter in the machine it registers 25 cents. If you put in a dime it registers 10 cents.
And that is it, you now understand the actual concept of polymorphism.
Want to make sure you don’t forget it? Try coming up with a few of your own real world analogies or examples of polymorphism.
Bringing it back to code
In code polymorphism can be exhibited in many different ways.
Most developers are familiar with runtime polymorphism that is common in many OO languages like C#, Java and C++, but many other kinds of polymorphism exist.
Consider method overloading.
If I create two methods with the same name, but they only differ in type, I have polymorphic behavior.
The interface for calling the method will be the same, but the type will determine which method actually gets called.
Add(int a, int b) Add(decimal a, decimal b)
You might be shaking your head “no” thinking that this is not polymorphism, but give me the benefit of the doubt for a moment.
The most common argument against this example as polymorphism is that when you write this code the method that is going to be called is known at compile time.
While this is indeed true for statically typed and compiled languages, it is not true for all languages.
Consider Add being a message instead of a method.
What I mean by this is that if you consider that the actual determination of the method that is called in this situation could be differed until runtime, we would have a very similar situation to the common shape example. (Late binding)
In many languages this is what happens. In Objective-C or Smalltalk for example, messages are actually passed between objects and the receiver of the message determines what to do at runtime.
The point here is that polymorphism can be done at compile time or during execution, it doesn’t really matter.
Other polymorphic examples in code
Since the intent of this post is not to classify and explain each type of polymorphism that exists in code, but rather to provide a simplified understanding of the general concept, I won’t go into a detailed explanation of all the kinds of polymorphism we see in code today. Instead I’ll give you a list of some common examples that you may not have realized were actually polymorphic.
- Operator overloading (similar to method overloading.)
- Generics and template programming. (Here you are reusing source code, but actual machine code executed by the computer is different for different types.)
- Preprocessing (macros in C and C++)
- Type conversions
Why understanding polymorphism is important
I may be wrong, but I predict that more and more development will move away from traditional OO as we tend to find other ways of modularizing code that is not so rooted in the concept of class hierarchies.
Part of making the transition requires understanding polymorphism as a general purpose and useful computer science concept rather than a very situational OO technique.
Regardless, I think you’ll agree that is it nice to be able to describe polymorphism itself rather than having to cite the commonly overused example of shapes.
(Note: Opinions expressed in this article and its replies are the opinions of their respective authors and not those of DZone, Inc.)
Barry Smith replied on Tue, 2013/04/16 - 8:45am
Why not make it even simpler? Polymorphism is just a neat way of doing repeated switch statements on a type variable:
e.g, in Java-ish:
public class Animal {
private int type;
public void speak() {
switch(type) {
case DOG: println("woof"); break;
case CAT: println("meow"); break;
}}
...etc. Polymorphism is basically just transforming the switches into implementations.
John J. Franey replied on Tue, 2013/04/16 - 10:12am
You didn't really help clear up understanding.
You munged together the concepts of overriding and polymorphism. You also attempted to distinguish polymorphism in weakly typed languages which is difficult because every method call is polymorphic by definition.
Quite simply, 'polymorph' is 'many-form'. I mean, if you go back to the greek, 'morph' is a noun not a verb, meaning 'form' or 'shape', and 'poly' means 'many'.
In OO programming, it means a method can have many (poly) definitions (morphs), where the definition invoked at runtime is determined when the method is called. How confusing is that?
John Sonmez replied on Tue, 2013/04/16 - 2:49pm
in response to:
John J. Franey
This is a common misconception about polymorphism.
Overriding is a form of polymorphism, as is generic programming.
You are talking only about subtype polymorphism.
Here is a wikipedia article that explains some of this:
Thanks for bringing this up though, it is certainly a point of confusion.
John J. Franey replied on Tue, 2013/04/16 - 3:47pm
in response to:
John Sonmez
A cat is form of animal, it is not a misconception to differentiate cats from animals. Overriding is a form of polymorphism, it is not a misconception to differentiate overriding from polymorphism. The differentiation clarifies.
Stephen Lindsey replied on Wed, 2013/04/17 - 5:34am
Just what, exactly, is the point of the large image at the top of this article. I would suggest that most people read these articles at work and it's not good when ones screen is dominated by such an image.
It's unnecessary, childish and unprofessional, you're by far not the only one who does this; you're just the one I picked to make the point.
Lund Wolfe replied on Sun, 2013/04/21 - 7:13pm
Polymorphism (in programming) does imply differences in behavior (methods) of derived classes. Otherwise, there is no reason to have derived classes.
The simple explanation of writing the code once for the usage of the super class is all that really matters from the developer's point of view. Technically, the derived types are created at run time and dynamically bound (and accessed accordingly).
Along with predicting the end of OO, I think the brain is overrated ;-)
Brad Appleton replied on Wed, 2013/04/24 - 6:02pm
I like the way I learned it in SmallTalk better -- "Polymorphism is the ability to send any message to any object capable of understanding it". This covers inclusion polymorphism, parametric polymorphism, and ad-hoc polymorphism. It even covers the seeming exception (tho not really) to subtype polymorphism known as "delegates" in C# (and which was known as "Signatures" in g++ back in the early 90s).
I often like to describe polymorphism (and its different types) based on upon what varies and what stays the same for each kind of polymorphism:
1. using the same method name for the same type (interface) to invoke differing implementations ==> subtype polymorphism
2. using the same method name for different types to invoke the same implementation ==> parametric polymorphism (generics/templates)
3. using the same method name for (same or different) types to invoke different method signatures ==> ad-hoc polymorphsm | http://java.dzone.com/articles/so-you-think-you-can-polymorph | CC-MAIN-2014-35 | en | refinedweb |
Epetra_OskiMultiVector: A class for constructing and using dense Oski multi-vectors on a single processor or a single core of a multi-processor. More...
#include <Epetra_OskiMultiVector.h>
Epetra_OskiMultiVector: A class for constructing and using dense Oski multi-vectors on a single processor or a single core of a multi-processor..
Definition at line 66 of file Epetra_OskiMultiVector.h.
Copy constructor.
Constructor creates and Epetra_OskiMultiVector from an Epetra_MultiVector.
Destructor.
Returns true if a deep copy of the multi-vector was created by the constructor.
Returns the Oski portion of the Multi-Vector.
Returns the Epetra portion of the Multi-Vector.
Reimplemented in Epetra_OskiVector.
Sets this equal to Source.
Reimplemented in Epetra_OskiVector.
Definition at line 108 of file Epetra_OskiMultiVector.h.
Definition at line 109 of file Epetra_OskiMultiVector.h.
Definition at line 110 of file Epetra_OskiMultiVector.h. | http://trilinos.sandia.gov/packages/docs/r10.8/packages/epetra/browser/doc/html/classEpetra__OskiMultiVector.html | CC-MAIN-2014-35 | en | refinedweb |
Computer Science Archive: Questions from April 07, 2008
- Anonymous askedI am writing this program for java, but I would like to havereference to another incase I need help.... Show more
I am writing this program for java, but I would like to havereference to another incase I need help. Thanks for the help!
Details
To approximate the square root of a positive number n usingNewton's method, you need to make an initial guess at the root andthen refine this guess until it is "close enough." Your firstinitial approximation should be root = 1;. Asequence of approximations is generated by computing theaverage of root and n/root.
Use the constant:
private static final double EPSILON = .00001;
Your loop for findSquareRoot should behave likethis:
make the initial guess for root
while ( EPSILON < absolute value of the differencebetween root squared and n )
calculate a new root
return root
Your class should have a constructor that takes the number youwant to find the square root of. Implement the usual accessormethod(s) and a findSquareRoot method that uses Newton'smethod described above to find and return the square root of thenumber. Add a method setNumber that takes a newnumber that replaces the number in the instance field. Supplya toString method that returns a string containing thenumber and the square root of the number.
Your test class should:3 answers
- Anonymous asked2 answers
- Anonymous asked3 answers
- Anonymous asked2 answers
- Anonymous askedWe have been usingPowerPC eieio(Enforce-In-Order Execution of I/O) assembly languageinstruction... Show more
Hi,We have been usingPowerPC eieio(Enforce-In-Order Execution of I/O) assembly languageinstruction on Freescale MPC500 Family Microcontrollers.
Can we use this instruction on Freescale MPC5500 FamilyMicrocontrollers, for example MPC5566? If yes, do we need tomodify any registers to make this instructionwork?
thanks.• Show less1 answer
- Praggy asked1 answer
- Anonymous asked1. What is theoutput of t... Show more
vector<int> v(10, 5);
v.push_back(7);
cout <<v.front() << " " << v.back();
1. What is theoutput of the code fragment above?
a. 5, 7
b. 5, 10
c. 7, 5d. 7, 10
vector<int> v;
v.push_back (3);
cout << v[0];
2. What is the output of the codeabove?
a. 0
b. 1
c. 2d. 3
vector<int> v (1,1);
v.push_back (2);v.push_back (3); v.push_back (4);
vector<int>::iterator i =v.begin();
vector<int>::iterator j = i + 2; cout<< *j << " ";
i += 3; cout<< *i << " ";
j = i - 1; cout<< *j << " ";
j -=2;
cout << *j<< " ";
cout <<v[1] << endl; //output 1
(j < i) ?cout << "j < i" : cout << "not (j < i)"; cout<< endl; // output 2
(j > i) ?cout << "j > i" : cout << "not (j > i)"; cout<< endl; //output 3
i =j;
i <= j&& j <= i ? cout << "i and j equal" : cout<< "i and j not equal"; cout
<< endl;//output 4
j =v.begin();
i =v.end();
cout <<"iterator distance end - begin =^ size: " << (i - j);//output 5
3. What is theoutput at the line labeled output 3?
a. j >i
b. not(j >i)
c. j <i
d. not(j <i)1 answer
- Anonymous askedeach object of the class bookType can hold the followinginformation about a book:title,up to fouraut... Show more
each object of the class bookType can hold the followinginformation about a book:title,up to fourauthors,publisher,ISBN,price,and number of copies in stock.To keeptrack of the number of authors,add another member variable• Show less1 answer
- Anonymous askedfind an optimal parenthesization of a matrix-chain product whose sequence of dimensions is <10,20... More »1 answer
- Anonymous asked0 answers
- Anonymous askeddraw the recursion tree for the MERGE-SORT procedure fromsection 2.3.1 on an array of 16 elements.Ex... Show more
draw the recursion tree for the MERGE-SORT procedure fromsection 2.3.1 on an array of 16 elements.Explain why mwmorizationis ineffective in speeding up a good divide-and-conquer algorithmsuch as MERGE-SORT.• Show less1 answer
- Anonymous asked1.The time complexityof the function isEmpty in an array list is O(1). a. True b. False 2. Every obj2 answers
- Anonymous asked1 answer
- timeiz2short askedThis program is suppose to ask theuser to enter the amount of a purchase. Then it computes thecount... Show moreThis program is suppose to ask theuser to enter the amount of a purchase. Then it computes thecounty tax and the state tax and then the total tax as well astotal sale. So why it won't run I have no idea but thisproject is long overdue and if there is anyone that can help me Iwould really appreciate because I really want to understand how towork with this to keep up with my class which is hard enough sinceI can't hear. So please look at the program below and tell mewhat's wrong it.import java.util.Scanner;• Show less
/**
Sales Tax Program
This program will ask the user to enter a purchase amount atwhich time the
amount of purchase plus the total sales tax will be computedwhich includes
the state sales tax and the county sales tax. Alsothese results willl be
displayed as well as the total computed.
*/
public class SalesTax
{
// Still don't know what this line is reallyfor.
public static void main(String[] args)
{
// Will compute the amount for state tax
double stateTax;
//Will compute the amount for county tax
double countyTax;
// Will compute the amount of total tax
double totalTax;
double amount;
// Will compute the total sale
double totalSale ;
double purchase;
Scanner keyboard = new Scanner(System.in);
countyTax = 0.02;
stateTax = 0.04;
System.out.print("Enter the amount of yourpurchase");
purchase = keyboard.nextDouble();
totalTax = purchase * stateTax + purchase *countyTax;
totalSale = purchase + totalTax;
System.out.print(purchase * stateTax);
amount = keyboard.nextDouble();
System.out.print(purchase * countyTax);
amount = keyboard.nextDouble();
System.out.print(purchase * stateTax + purchase* countyTax);
totalTax = keyboard.nextDouble();
System.out.print(totalSale);
}
}6 answers
- Anonymous askedHi, I need help writing a program. I 'm not a computer major,but I have to take this class for gradu... Show moreHi, I need help writing a program. I 'm not a computer major,but I have to take this class for graduating requirements. Ihave never done programming in my life and this is my firstprogram assignment and I was wondering if someone can help me startit and give me adivce on how to continue it or end it.Problem: Transported back in time (with your computer) you arein charge of harbor defenses for your city. Using the recentlydevelped length standard, the meter, the cannon master wants to nowif a shot fired at a given velocity from his cliff-mountedcannon will hit an enemy ship entering the harbor. In addition tothe velocity, he gives you the height of the cliff, the distancefrom the ship, and the width of the ship. You decide to amaze thegeneral by develping a program to solve the problem.You can assume that the cannon is mounted horizontally. theship distance is given from the end of the cannon to the middle ofthe ship.Picture:to make the problem easier, this is what I did and what myteacher also prefers:To determine the distance of the shot you must dermine howlong it takes the cannon ball to fall from the height of the cliff.the initial downward velocity is zero. Use 9.8 m/s to representgravity.Note: I also have to make note of the units when Iwrite my program, when i plug in numbers, i have to come out withsomething meters. I also need help with that.The equation my teacher provided us is with is:then, the distane of the shot=velocity*timeSome specification of the program is that all numericaloutputs should be done using %f flag, the all calculations shouldmaintain as much precision as is supported by the C double type inthe gcc compiler. Assume all inputs will be greater thanzero.This is how I started mine:/* calculate the distance of the cannon ball */#include <stdio.h>#define GRAVITY 9.8int main(void){double height >0double width > 0double velocity>0doubleit right?2) on my paper, my teacher gave us an example of asample run:enter the height of the cliff (meters):50enter the distance of the ship from the cliff(meters):180Enter the width of the ship(meters):15Enter the velocity of he cannon ball(meters/second):Distance of ball: 175.691055metersDirect hit!!!!!!So after i write my program, i'm guessing my screenwill haveenter the...enter the...enter the...then the teacher plugs in numbers and solves theproblemsBut on the formula given, it doesn't include all widthof the ship. should i put all this variable like the width in myprogram still?When my teacher tests each program: he's going to havea set of numbers and he going to type in this number and pressenter and it's going to tell him the distance.so how would i do this. please help. I don't want tofail this class.• Show less5 answers
- Anonymous askedWrite a program that declares three one-dimensional arrays namedprice, quantity and amount. Each arr... Show moreWrite a program that declares three one-dimensional arrays namedprice, quantity and amount. Each array should be declared inmain(). and should be capable of holding 10 double-precisionnumbers. The numbers that should be stored in price are 10.62,14.89, 13.21, 16.55, 18.62, 9.47, 6.58, 18.32, 12.15, 3.98. Thenumbers that should be stored in quantity are 4, 8.5, 6, 8.35, 9,15.3, 3, 5.4, 2.9, 4.8. Your program should pass these arrays to afunction called extend (), which should calculate the elemnts inthe amount array as the product of the equivalent element in theprice and quantity arrays.
After extend has put values into the amount array, the values inthe array should be displayed from within main.
• Show less1 answer
- Anonymous askedcha... Show more
Consider thefollowing lines of code:struct studentType
{char name[27];double gpa;int sID;char grade;};int *p;double *q;char *chPtr;studentType *stdPtr;1. The statement“p++” increments the value of p by ____ bytes
a. 1
b. 2
c.4
d. 8
2. The statement“stdPtr++” increments the value of stdPtr by____
bytes.
a. 10
b. 20
c.40d. 80I answered C for both. is thatcorrect?• Show less1 answer
- Anonymous asked1 answer
- Anonymous askedWrite a Program to test the various opera... Show morePlease assist me with this program. This is my 2ndattempt.Write a Program to test the various operations of the classclockType.Thank you• Show less1 answer
- Anonymous askedIn Java a final class must be sub-cl... Show moreAccording to java conventions Please Help me solving theseMCQs.
In Java a final class must be sub-classed before it.
?? True
?? False
A top level class without any modifier is accessible to
?? any class
?? any class within the same package
?? any class within the same file
?? any subclass of this class
A top level class may have only the following accessmodifier.
?? Package
?? Private
?? Protected
?? Public
In Java an abstract class cannot be sub-classed
?? True
?? False
Given a one dimensional array arr, what is the correct way ofgetting the number of elements in arr
?? arr.length
?? arr.length – 1
?? arr.size – 1
?? arr.length()
When recursive method is called to solve a problem, the methodactually is capable of solving
only the simpler case(s), or base case(s).
?? True
?? False
Which of these are valid declarations for the main method?
?? public void main();
?? public static void main(String args[]);
?? static public void main(String);
?? public static int main(String args[]);
Map interface is derived from the Collection interface.
?? True
?? False
In Java, which of these classes implement the LayoutManagerinterface?
?? RowLayout
?? ColumnLayout
?? GridBagLayout
?? FlowLayoutManager
BorderLayout is the default layout manager for a JFrame’scontent pane
?? True
?? False• Show less2 answers
- Anonymous askedCode the base class Member. Your class must include accessor/mutator properties or metho... Show morex.Code the base class Member. Your class must include accessor/mutator properties or methods for each state variable and at least one constructor. The virtual method Exercise( ) will calculate and return a double representing the pounds lost by the member based on the following formula:
poundsLost = (minutesOfExercise*0.03024*currentWeight)/3500
Code the derived class Hiker. Your class must include an accessor/mutator property for the additional data member and at least one constructor. It must contain a polymorphic implementation of the Exercise() method using the following formula:
poundsLost = (minutesOfExercise*0.03024*currentWeight)/3500
Code a Main method to test the methods defined in the Member and Hiker classes. Specifically, at least one object MUST be instantiated from each class. Use the objects to test the method Exercise() in both classes. Note: you do not need to test the functionality of the other components in your classes.9Ïmx.õ..õx.øi5 • Show lessHere is what I have so far. I am having a few issues when I debug
using System;
using System.Collections.Generic;
using System.Text;
namespace Lab6
{
class Member
{
protected double currentweight;
public Member() { }
public double currentWeight
{
get { return currentweight; }
set { currentweight = value; }
}
public virtual void Exercise(double a)
{
if (a <= currentweight)
{
currentweight -= a;
Console.WriteLine("Here is your current weight!");
}
else
Console.WriteLine("No weight entered!");
}
class Hiker : Member
{
public Hiker() { }
public virtual double GetPoundsLost(double minutesOfExercise)
{
double poundsLost = (minutesOfExercise * 3.024) / 3500;
return poundsLost;
}
}
class Test
{
static void Main(string[] args)
{
Member myMember = new Member();
myMember.currentWeight = 170;
myMember.minutesOfExercise = 60;
Console.WriteLine();
Hiker myHiker = new Hiker();
myHiker = (minutesOfExercise * 0.03024 * currentWeight) / 3500;
Console.ReadLine();
}
}
}
}0 answers
- Anonymous asked1 answer
- Anonymous asked0 answers | http://www.chegg.com/homework-help/questions-and-answers/computer-science-archive-2008-april-07 | CC-MAIN-2014-35 | en | refinedweb |
This mini-package provides a simple class, Block, that can serve as a fixed-size container (in the STL sense) and that can also serve as a replacement for a native C++ array. module provides a fixed-size C++ container class that fully supports all operations needed to cooperate smoothly with the standard C++ library algorithms, yet supplies all the operations and benefits of a fixed-size native C++ array.
To use the Block facilities, a user simply includes the header file:
#include "ZMtools/Block.h"
Subsequently, the user may instantiate and employ one or more Blocks, as in the following simple example:
Block< int, 20 > a; ... a[0] = 16; ... std::cout << a.size();
A test program that validates the behavior of the Block<> template is available in the program zmtBlockTest.cc, furnished as part of this module.
The sole header file needed by the user is ZMtools/Block.h. This file contains the complete implementation in the form of a template class, Block<>, together with a specialization of this template that handles the case of a zero-sized container.
This class was adapted from Matthew Austern's block class as published in Generic Programming and the STL, ISBN 0-201-30956-4, and a similar class, c_array, published by Bjarne Stroustrup in The C++ Programming Language, 3rd Edition, ISBN 0-201-88954-4. The specialization for the case of a zero-length container was provided by Marc Paterno. Walter Brown edited the result for conformance to Zoom standards. | http://www.fnal.gov/docs/working-groups/fpcltf/Pkg/ZMtools/doc/html/0Block.html | CC-MAIN-2014-35 | en | refinedweb |
Calling inner classes?
Rob Brew
Ranch Hand
Joined: Jun 23, 2011
Posts: 99
posted
Jul 09, 2011 10:01:53
0
I'm following the oracle training guide as best i can, trying to play with inner classes though and i'm lost. I'm having problems at line 47. how do i call and use innner classes?
Thanks for all your help guys, it means a lot.
vehicle.java
abstract class vehicle { int date_month = 11; String colour = ""; int price = 0; static int id = 0 ; public vehicle() { colour = "blue"; id++; } public vehicle(int p) { price = p; id++; } public void set_colour (String c) { colour = c; } String get_colour() { return colour; } void set_price(int p) { price = p; } int get_price() { return price; } static int vehicleId() { return id; } public static void main (String args[]) { car o = new car(1445, "blue"); System.out.println(o.toString()); System.out.println(o.get_price()); System.out.println(o.get_colour()); taxi t = new taxi(); System.out.println("Number of cars = " + vehicle.vehicleId()); System.out.println("Rate "+ (new taxi().getRate(5))); } } class car extends vehicle { boolean in_showroom = false; int mileage = 0; int price; public car() { super(); } public car(int p){ super(p); taxi.Speedo speed = new taxi.Speedo(); } public car(int p, String s) { super(p); super.colour="green"; System.out.println(super.colour); } void drive() { System.out.println("beep beep"); } boolean test_drive() { if (in_showroom == true) { System.out.println("nice driving"); return true; } else { return false; } } }
taxi.java
public class taxi extends vehicle implements cab { boolean booked; int month_serviced; int rate = 5; public taxi () { booked = false; month_serviced=4; this.book(); System.out.println("Taxi booked"); }; public taxi (int r) { rate = r; System.out.println("booked at " + rate + "per hour"); } public int getRate (int hours) { return (rate * hours); } public boolean book() { if (booked == false) { booked = true; System.out.println("where to governor?"); return booked; } else { System.out.println("already booked"); return booked; } } public void service() { System.out.println("MOT passed"); } class Speedo { public int speed; public int getSpeed() { return speed; } } }
cab.java
public interface cab { public boolean book(); public void service(); }
marc weber
Sheriff
Joined: Aug 31, 2004
Posts: 11343
I like...
posted
Jul 09, 2011 17:11:19
0
Rob Brew wrote:
... I'm having problems at line 47. how do i call and use innner classes? ...
I think your problem is actually at line 64.
An instance of an inner class (Speedo) always needs to be associated with an instance of the enclosing class (taxi). This means you need an instance of taxi before you can create an instance of Speedo. So instead of just...
taxi.Speedo speed = taxi.new Speedo();
...create a new instance of taxi first, then use that instance to create a new Speedo...
taxi.Speedo speed = new taxi().new Speedo();
Edit: Note that if you already have an instance of the enclosing class, then you could simply use that. For example...
taxi myTaxi = new taxi(); taxi.Speedo mySpeedo = myTaxi.new Speedo();
"We're kind of on the level of crossword puzzle writers... And no one ever goes to them and gives them an award."
~Joe Strummer
sscce.org
I agree. Here's the link:
subject: Calling inner classes?
Similar Threads
overloading - basics
help needed with inheritance
static int not incrementing, constructor not called.
Dynamic Method Dispatch
variables in inteface
All times are in JavaRanch time: GMT-6 in summer, GMT-7 in winter
JForum
|
Paul Wheaton | http://www.coderanch.com/t/544761/java/java/Calling-classes | CC-MAIN-2014-35 | en | refinedweb |
#include <paradox.h>
char * PX_time2string(pxdoc_t *pxdoc, long time, const char *format)
Creates a string representation of a paradox time as specified in the parameter format. The time time will produce resonable results.
Returns pointer to string on success and NULL on failure.
PX_timestamp2string(3), PX_date2string(3)
This manual page was written by Uwe Steinmann uwe@steinmann.cx. | http://www.makelinux.net/man/3/P/PX_time2string | CC-MAIN-2014-35 | en | refinedweb |
Store and multiple filters
Store and multiple filters
Hi,
i have a Ext.Store with static data. To filter with one param is no problem, but how can I apply multiple filters?
In API Store.filter is defined as (filter,value) so i don't no how to apply more like
company:ba,city:be etc.
Why not to use Store::filterBy()?Use the force - read the source.
- Join Date
- Mar 2007
- Location
- Notts/Redwood City
- 30,498
- Vote Rating
- 46
Yes, the documentation is totally clear:
2 params:
Parameters:
* fn : Function
The function to be called, it will receive 2 args (record, id)
* scope : Object
(optional) The scope of the function (defaults to this)
Filter by a function. The specified function will be called with each record in this data source. If the function returns true the record is included, otherwise it is filtered.
Where's the problem?
- Join Date
- Mar 2007
- Location
- Notts/Redwood City
- 30,498
- Vote Rating
- 46
That's a direct quote from the documentation.
I'd like to preface this by saying that I am an EXT noob... and have really only been using the library for a few days now.
That being said, I am also looking to apply multiple filters, but I was hoping to apply them in stages. ie. User does a search for something and the data is filtered, then the user does a second search on the already filtered data.
I was hoping that you could simply do subsequent filterBy calls on the data store, but this appears to not be the case. My second filterBy seemingly searches ALL records [even ones previously filtered out].
Is there a way to do this?
Thanks!
I would be interested in this too.
I would be interested in this too.
ThanksSteven Benjamin
Senior Coldfusion Developer
filterBy not working but filter works!!
filterBy not working but filter works!!
Following code:
var store = Ext.data.StoreManager.get("SampleStore");
store.filterBy(function (record) //scope is optional so I dint use it.
{
return (record.get("Name") == "Jack");
});
not working but
store.filter("Name","Jack");
works!!
Please help. | http://www.sencha.com/forum/showthread.php?12185-Store-and-multiple-filters&p=904951&viewfull=1 | CC-MAIN-2014-35 | en | refinedweb |
Type: Posts; User: opedog
The main content of my app is a custom TabControl that I've re-templated and styled the heck out of. One of the design decisions of this application is that it should have no modal windows (the...
VS2008, C#
In several of my business objects, I'm lazily loading some data that requires a call to the DB. This presents a problem when trying to quick watch the parent object. When it hits that...
I'm curious as to why you can't use a label. I only use a textbox as a label for 2 circumstances:
1) I need to a readonly display that will still allow the user to copy the data from the control....
Hey thanks, I was wondering this myself a few days ago. :)
I don't have many specifics as I've yet to delve into this a lot yet, but research the VirtualMode property on the DataGridView and see how you can use it. VirtualMode only renders the rows the user...
Heya. :)
I've got a quirky thing happening with databinding in C#, .NET 2.0, CSLA 2.something, and my databinding.
The user hits a button, i popup a modal, populate some comboboxes on the...
You may want to explicitly tell your SqlCommand object that the CommandType is a StoredProcedure:
nonqueryCommand.CommandType = CommandType.StoredProcedure;
I'm not sure what the default is...
Self reply inc!
Seems this individual has the same problem and essentially the same workaround.
One of the comments:
So I'm playing with a screen in our system. On the screen are a few comboboxes that we have databound to a list of stuff from a cache. Dictionary type stuff that we don't have to go to the database...
Replace the catch bit of my example with this:
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
This is also permissable:
Yeah, yeah...
try
{
if (x < 0)
throw new Exception("Parameter out of bounds.");
}
catch
{
Console.WriteLine("");
}
Your signature doesn't display for me.
Quick google search revealed this:
int i = 100;
string str = Convert.ToString(i, base); //base = 2, 8,10 or 16
You'll obviously want to use base 2 for binary.
In order to maximize the window of the .exe, you'd somehow have to match up the Process with the window handle. Unfortunately I'm not sure how to do this. :(
Are you trying to figure out what hash function the Hashtable uses? It's probably object.GetHashCode().
Perhaps you have to specify the owner of the modal?
Your other options include inheriting from System.Drawing.Image. Bitmap actually inherits from Image, anyway.
Or you could use what's called extension methods.
...
Yep, set the width of the ColumnHeader objects to -2. For instance:
foreach (ColumnHeader ch in lvList.Columns)
{
ch.Width = -2;
}
Try it!
Beaten by nelo by mere minutes!
You're going to want to take a look at the System.Diagnostics.Process class. Inside of it it has a StartInfo class that you can fill in with executable location data.
Process batFile = new...
Yup, nested classes are instantiated the exact same way as all the others. For instance:
public class Class1
{
public class NestedClass1
{
}
}
@zdavis - I don't see any nested classes in your example. Maybe a misplaced bracket?
@JustSomeGuy - What I'd suggest here is to just try it out. Nested classes in C# are very useful for limiting...
You may want to provide a link to the example you're referring to. We don't know if you're talking about ASP or WinForms. I dunno if it'd matter much, but more context never hurts.
Yep, the simplest way being using a BackgroundWorker. Very easy to use for simple threading. Just remember to wrap any UI changes in the following:
... | http://forums.codeguru.com/search.php?s=460cdf5c82caa02d3d474e72d93cf2bc&searchid=4875381 | CC-MAIN-2014-35 | en | refinedweb |
22 June 2010 09:09 [Source: ICIS news]
SINGAPORE (ICIS news)--Bayer MaterialScience (BMS) has opened a €7m ($8.64m) functional films research centre in Singapore which would focus on high-tech films and nanotechnology for the Asia-Pacific electronics market, the German polymer producer said on Tuesday.
The company said in a statement that the new centre would pursue joint projects with partners to develop functional films that could be applied to flexible screens and three-dimensional (3-D) displays, as well as nanomaterials used in energy-efficient lighting.
"By collaborating with a network of research organisations and business partners in Singapore and the whole of Asia Pacific, we will put to use the innovative ideas to fast-track products for tomorrow’s markets like automotives, electronics and displays," said Marcus Yim, managing director of Bayer southeast Asia.
The research centre, which presently houses about 30 researchers and staff, is the first of its kind in the Asia-Pacific and outside Germany, according to the company.
"The opening of this facility is a clear indication of our continued commitment to ?xml:namespace>
“This confidence is underpinned by the fact that demand continues to grow for new and innovative film-based products. Much of the research and development for these new materials will be undertaken at this new centre,” he added.
Meanwhile, Thomas told ICIS earlier on Monday that the company would make a decision on whether to build a new polycarbonate plant either in Thailand or China by the end of 2010.
( | http://www.icis.com/Articles/2010/06/22/9369878/bayer-materialscience-opens-new-functional-films-research-centre.html | CC-MAIN-2014-35 | en | refinedweb |
Eugene,
About six months ago, I was working on a project that used this component. Specifically, I was using 0.8.2. At the beginning of June, the project was mothballed (and so was the machine). Well, we have a need to bring it back to life. When I fired up the machine, the app would not run (but did load without a problem) because apparently, I had used an evaluation copy of Sql Server 2008 that had expired, so I loaded up Sql Server 2008 R2. I tried loading the project to change the connection string in the web.config and realized that I was also using an evaluation copy of Visual Studio 2010. So, I installed a current version. I modified the web.config copied it to the web site (i did not re-deploy the whole project). I restarted the web site and attempted to access the app and only got a blank page. Ultimately, what I found was that Ext.app.REMOTING_API was undefined (it was failing in the addProvider call). All I can assume is the installing the new components must have changed something.
I really don't want to have to plug in the current version if I don't have to.
Any ideas?
Thanks, Brian.
Brian,
I would suggest you load the ASP.NET MVC source code as a project in your current app, remove the static Ext.Direct.Mvc DLL and point to the one form the source code bin dir. Then you can step into the source of the addProvider call and probably get more info as to why it's failing.
Also, make sure that you have marked the AccountController with the [DirectIgnore] attribute if you have one, or remove it altogether. MS started adding that controller to 2010 projects at some point and the addProvider call blows up when trying to create an Api for it.
HTH
Agreed, the membership provider is kinda narly anyway, I wouldn't use it.
Well, I bit the bullet and decided to try just upgrade to the current version of Ext.Direct.MVC and without upgrading extjs (using version 3.1.1). Had to update all of the controllers because of the change to Direct() and the removal of DirectForm(). I was quite pleased that the app came up and worked without any additional changes.
I am actually using the AccountController and the methods were generated just fine.
Thanks for the suggestions.
You don't use membership at all? How do you do authentication, and working with [Authorize] Attribute?
Because of the [Authorize] attribute I work with the membership provider, or do you have another way to accomplish this?
I created a CustomAuthorize attribute that I use through out the controllers instead of the Authorize attribute. First it calls base.AuthorizeCore() and then it adds a custom header to the response that tells the client the result of the base.AuthorizeCore() call. The override of AuthorizeCore() then returns true.
Code:
public class CustomAuthorizeAttribute : AuthorizeAttribute { protected override bool AuthorizeCore(HttpContextBase httpContext) { bool result = base.AuthorizeCore(httpContext); httpContext.Response.AddHeader("authorizationTimeout", result ? "false" : "true"); return true; } }
Then in the client, I added the following:
Code:
Ext.data.Connection.on('requestcomplete', function (con, response, options) { if (Ext.isFunction(response.getResponseHeader)) { var authTimeout = response.getResponseHeader('authorizationTimeout'); if (authTimeout === 'true' && !Ext.isDefined(Ext.ComponentMgr.get('login-box'))) { app.startLogin(); } } });
I am sure that there are other ways of handling this, but this is how I did it....
Interesting method. But does your controller still give back all the requested data? That would be a security hole, I think?
Congratulations Eugene. The work you have done is very impressive. Thanks!
Your example ran fine but I did get this build warning...
Warning 3 'System.Web.Mvc.ValueProviderDictionary' is obsolete: 'The recommended alternative is to use one of the specific ValueProvider types, such as FormValueProvider.' C:\Users\ttb\Downloads\Ext.Direct.Mvc-2.1.0\Source\Ext.Direct.Mvc\DirectMethodInvoker.cs 44 41 Ext.Direct.Mvc
Hi,
When direct batches calls, do they run in a specific order on the server? In the example below will onSayHello run on the server before onEchoDate runs?
Code:
onBatch: function() { this.onSayHello(); this.onEchoDate(); this.onAddNumbers(); this.onEchoPerson(); },) | http://www.sencha.com/forum/showthread.php?72245-Ext.Direct-for-ASP.NET-MVC/page28 | CC-MAIN-2014-35 | en | refinedweb |
David Reid wrote: >. I'm not goign to opine on possible repository layouts, as there are people who knwo far better than me how that works. However, if you sort it out so you can jsut run python with setuptools, you can keep the directory structure however you like on disc, and then just type "./setup.py develop" for each project you have checked out to do all the funky namespace and path mangling that setuptools does, and work with your working copy. I suppose a utility script that runs the equivalent for the various setup files in the various subprojects would be useful if you always want to work with the bleeding edge, but other than that, it's fairly easygoing. Moof -- Giles Antonio Radford, alias Moof "Too old to be a chicken and too young to be a dirty old man" Serving up my ego over at <> | http://twistedmatrix.com/pipermail/twisted-python/2005-October/011681.html | CC-MAIN-2014-35 | en | refinedweb |
18 May 2009 03:04 [Source: ICIS news]
GUANGZHOU (ICIS news)--China’s polyvinyl chloride (PVC) foam industry needs to adopt higher standards and keep up with technology advancements to become competitive, a top official from the country’s plastic processing association said late on Sunday.
“The ongoing financial crisis has dragged down ?xml:namespace>
“By establishing tighter product standards and creating a platform for exchanging technical know-how, Chinese PVC foam exporters can improve their productivity and competitiveness,” Liao added.
According to a report presented at the summit, product standards for solid PVC foam board had failed to keep up with technological advances. This had created a wide disparity in product quality across the industry.
A similar trend was seen in the rapidly expanding hollow PVC foam board industry, where large manufacturers created their own product standards, the report said.
Liao said the PVC foam market should be encouraged to grow as it was a relatively environment friendly industry. The material was often an ideal substitute for wood and could be manufactured using components of recycled plastic | http://www.icis.com/Articles/2009/05/18/9216863/chinas-pvc-foam-industry-needs-tighter-product-standards.html | CC-MAIN-2014-35 | en | refinedweb |
Reusable React controls for your SharePoint Framework solutions
¶
This repository provides developers with a set of reusable React controls that can be used in SharePoint Framework (SPFx) solutions. The project provides controls for building web parts and extensions.
Attention
In order to migrate to
v2 it is advicded to follow this guide: Migrating from V1.
Attention
v2 version of the controls project has a minimal dependency on SharePoint Framework version
1.11.0.
v1 has a minimal dependency on SharePoint Framework version
1.3.0. Be aware that the controls might not work in solutions your building for SharePoint 2016 with Feature Pack 2 on-premises. As for SharePoint 2016 with Feature Pack 2 version
1.1.0 of the SharePoint framework is the only version that can be used. SharePoint 2019 on-premises uses SharePoint framework
v1.4.0 and therefore should be fine to use with these controls..4.0 the localized resource path will automatically be configured during the dependency installing.
Once the package is installed, you will have to configure the resource file of the property controls to be used in your project. You can do this by opening the
config/config.json and adding the following line to the
localizedResources property:
"ControlStrings": "node_modules/@pnp/spfx-controls-react/lib/loc/{locale}.js"
Telemetry¶
All controls gather telemetry to verify the usage. Only the name of the control and related data gets captured.
More information about the service that we are using for this can be found here: PnP Telemetry Proxy.
Since version
1.17.0 it is possible to opt-out of the telemetry by adding the following code to your web part:
import PnPTelemetry from "@pnp/telemetry-js"; ... const telemetry = PnPTelemetry.getInstance(); telemetry.optOut();
Available controls¶
The following controls are currently available:
- Accordion (Control to render an accordion)
- Carousel (Control displays children elements with 'previous/next element' options)
- Charts (makes it easy to integrate Chart.js charts into web part)
- ComboBoxListItemPicker (allows to select one or more items from a list)
- DateTimePicker (DateTime Picker)
- FilePicker (control that allows to browse and select a file from various places)
- FileTypeIcon (Control that shows the icon of a specified file path or application)
- FolderExplorer (Control that allows to browse the folders and sub-folders from a root folder)
- FolderPicker (Control that allows to browse and select a folder)
- GridLayout (control that renders a responsive grid layout for your web parts)
- IconPicker (control that allows to search and select an icon from office-ui-fabric icons)
-)
- TreeView (Tree View)
- WebPartTitle (Customizable web part title control)
Field customizer controls:
Note
If you want to use these controls in your solution, first check out the start guide for these controls: using the field controls.
-) | https://pnp.github.io/sp-dev-fx-controls-react/ | CC-MAIN-2020-50 | en | refinedweb |
import "github.com/rogpeppe/go-internal/lockedfile"
Package lockedfile creates and manipulates files whose contents should only change atomically.
lockedfile.go lockedfile_filelock.go mutex.go
Read opens the named file with a read-lock and returns its contents.
Write opens the named file (creating it with the given permissions if needed), then write-locks it and overwrites it with the given content.
A File is a locked *os.File.
Closing the file releases the lock.
If the program exits while a file is locked, the operating system releases the lock but may not do so promptly: callers must ensure that all locked files are closed before exiting.
Create is like os.Create, but returns a write-locked file.
Edit creates the named file with mode 0666 (before umask), but does not truncate existing contents.
If Edit succeeds, methods on the returned File can be used for I/O. The associated file descriptor has mode O_RDWR and the file is write-locked.
Open is like os.Open, but returns a read-locked file.
OpenFile is like os.OpenFile, but returns a locked file. If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; otherwise, it is read-locked.
Close unlocks and closes the underlying file.
Close may be called multiple times; all calls after the first will return a non-nil.
MutexAt returns a new Mutex with Path set to the given non-empty path.
Lock attempts to lock the Mutex.
If successful, Lock returns a non-nil unlock function: it is provided as a return-value instead of a separate method to remind the caller to check the accompanying error. (See..)
Package lockedfile imports 6 packages (graph) and is imported by 2 packages. Updated 2020-05-12. Refresh now. Tools for package owners. | https://godoc.org/github.com/rogpeppe/go-internal/lockedfile | CC-MAIN-2020-50 | en | refinedweb |
Yes, it is safe and highly reliable to hire dissertation help online. There is a massive advantage when you opt for hiring a tutor or online help for dissertation service. You get a 24/7 customer service option that will be with you until you are satisfied with the work. The experts writing your dissertation will provide you with a thoroughly researched paper that will fetch you nothing less than an A+ grade.
Tag: helper
magento2 – Magento 2 Model file Const get in Helper
Magento 2 Model file Const get in Helper.
I want to this type :-
File Path :- GetSomeMojoCategoryLandingPageModelEntityAttributeSource
<?php namespace GetSomeMojoCategoryLandingPageModelEntityAttributeSource; use MagentoFrameworkOptionArrayInterface; class Landingpageproducts extends MagentoEavModelEntityAttributeSourceBoolean implements ArrayInterface { const VALUE_NO = 'lpage_no'; const VALUE_NEW = 'lpage_new'; const VALUE_FEATURED = 'lpage_featured'; const VALUE_SALE = 'lpage_sale'; protected $_options; public function getAllOptions() { return ( ('value' => self::VALUE_NO, 'label' => __('No')), ('value' => self::VALUE_NEW, 'label' => __('New Products')), ('value' => self::VALUE_FEATURED, 'label' => __('Featured Products')), ('value' => self::VALUE_SALE, 'label' => __('Sale Products')) ); } ?>
Above const value get in Helper file.
So Please Help me how to get const in helper.
THANKS.
magento2 – Override a helper using Plugin
I have been trying to use plugin to override
MagentoSalesHelperReorder.php but I am not sure how and have been stuck with this problem. Basically I am trying to override the function in this helper file. I have added a comment inside the code what I would like to change.
/** * Check is it possible to reorder * * @param int $orderId * @return bool */ public function canReorder($orderId) { $order = $this->orderRepository->get($orderId); if (!$this->isAllowed($order->getStore())) { return false; } $currentOrder = $this->registry->registry('current_order'); if ($this->customerSession->isLoggedIn() || isset($currentOrder)) { \WHAT I AM TRYING TO DO - canReorder() change to canReorderIgnoreSalable() return $order->canReorderIgnoreSalable(); } else { return false; } }
How can I do this through plugin?
list manipulation – KenKen Puzzle Helper – Dropping order-less sequences
In the following example, I am generating all the variants of a $9 times 9$ KenKen puzzle that come in groups of three using addition that result in $18$.
data = Select(Tuples(Range(9), 3), Plus @@ # == 18 &)
This generates
${{1,8,9},{1,9,8},{2,7,9},{2,8,8},{2,9,7},{3,6,9},{3,7,8},{3,8,7},{3,9,6},{4,5,9},{4,6,8},{4,7,7},{4,8,6},{4,9,5},{5,4,9},{5,5,8},{5,6,7},{5,7,6},{5,8,5},{5,9,4},{6,3,9},{6,4,8},{6,5,7},{6,6,6},{6,7,5},{6,8,4},{6,9,3},{7,2,9},{7,3,8},{7,4,7},{7,5,6},{7,6,5},{7,7,4},{7,8,3},{7,9,2},{8,1,9},{8,2,8},{8,3,7},{8,4,6},{8,5,5},{8,6,4},{8,7,3},{8,8,2},{8,9,1},{9,1,8},{9,2,7},{9,3,6},{9,4,5},{9,5,4},{9,6,3},{9,7,2},{9,8,1}}$
I can then do something to search for repeated cases without order
Cases(data, {OrderlessPatternSequence(1, 8, 9)})
This generates (I want to delete all those after $198$ from data, but to do it for each unique set of three digits).
$${{1,8,9},{1,9,8},{8,1,9},{8,9,1},{9,1,8},{9,8,1}}$$
This approach has two drawbacks, I had to know the sequence to test for, then I can use that to drop all the repeats from data. I would have to repeat this for the next unique sequence.
Is there a simple way to create
data2 = some_fancy_command(data)
It produces data2 (note – I don’t care about the commas either), which only has unique 3-digit numbers regardless of order
$${{189},{279},{288}},{369},{378}}… $$
What is the easiest way to do that?
Note that I am familiar with, but only want a helper as opposed to a solver.
Aside: My goal is to have a tool that effectively duplicates as maybe a CDF of just an MMA Notebook where I enter all the cages, their type, the size of the puzzle and it provides hints on all the numbers that can go into the cage.
magento2 – Get userid of the admin logged in magento 2 helper
Below is a helper with a dependency and you can see within one of its function how the backend user id is read
<?php namespace MbsBackendScreenHelper; use MagentoFrameworkAppHelperAbstractHelper; use MagentoFrameworkAppHelperContext; class AdminUserReaderHelper extends AbstractHelper { /** * @var MagentoBackendModelAuthSession */ private $authSession; public function __construct( Context $context, MagentoBackendModelAuthSession $authSession ) { parent::__construct($context); $this->authSession = $authSession; } public function doSomethingWithMyAdminUserId() { ... $adminUserId = $this->authSession->getUser()->getId(); ... } }
dependency injection – How to solve the dilemma of helper functions relying on an object?
My issue can be concisely described as: is there any way (in PHP, or a pattern) to force default parameters at the time a class is called with the keyword
use so that people can just…well, use it, instead of having to deal with setup?
I usually have my functions that don’t really depend on much and don’t have side-effects inside a
Wrappers class.
Inside that
Wrapper class, I found myself to be needing an object, a dependency. Now, of course, I have a service provider that I can retrieve it from. It looks like this:
use Services; class Wrappers { public function helpMeDoSomething() { //an object implementing MyServiceInterface $service_i_need = Services::get( 'name' ); //do something with the service. } }
But, of course, that’s heavily problematic. Even if I do very granular & proper checking on
$service_i_need, I’m still hiding a dependency. When someone looks at the function, it’s not directly clear that it relies on a
MyServiceInterface and so, given I was out of ideas, I simply decided to go up one level and make the
Wrappers depend on the
MyServiceInterface, then wrap it all inside a service itself, so:
class Wrappers { public function __construct( MyServiceInterface $service_i_need ) { $this->service_i_need = $service_i_need; } public function helpMeDoSomething() { //We can use $this->service_i_need here! } }
Great! Now I just register my service:
Services::register( 'wrappers', new Wrappers( new ServiceINeed ) ) and all’s good.
Well, except now my
Wrappers class needs instantiation, which means I can’t
use Wrappers; at the top of my document, so, as a developer that wants to use
Wrappers, I’d now have to do:
use Services; $wrappers = Services::get( 'wrappers' ); $wrappers->helpMeDoSomething();
To me,
use is an absolute (good) word: **it shows whoever’s reading the document that it’s a dependency that’s needed 100% of the time and it doesn’t need any form of instantiation or setup.
You see, my
Services package can be brought into a document with
use Services and up until the point, we’re roughly on the same page as doing
use Wrappers, however, the problem with this is that my
Wrappers are supposed to only be no-side-effects functions that you can use on your data, whereas a service is an object that you should definitely type-hint, so, the expected outcome of
use Services is wildly different from
use Wrappers.
I’m stuck.
All I want is to globally expose a default implementation of a class such that its functions can be used statically.
In short, I want to initialize
Wrappers on my own as the creator of this package, then for others to just do
use Wrappers; and then for them to use
Wrappers::helpMeDoSomething()…and as I implemented every solution, although basic, it dawned on me: whatever uses
Wrappers::helpMeDoSomething() also indirectly depends on the service that
Wrappers depends on.
Basically, the “blindness” level of whoever’s using
Wrappers is as deep as the function call tree – if you’re calling my
helpMeDoSomething() inside function
A, then
A gets called inside
B, then
B inside
C, you are depending on an
MyServiceInterface object…you just don’t know it or it’s very deeply hidden.
How to solve this?
How to merge two descending singly linkedlist into one ascending linkedlist using Recursion without any helper method?
Here is my solution:
I want to know if my solution fits the requirement 100%? Or if there is any better solution?
Constraint of the question:
must be singly linkedlist
must use recursion, and no helper methods allowed.
The method must return the head node of the merged list.
class Solution{ Node dummy=new Node(0,null);// the dummy node to store the merged result public Node mergeAscend(Node a,Node b){ if(a==null&&b==null){//base case return null; } else{ if((a!=null&&b==null)||a.value>=b.value){// insert "b" after dummy //store the next node of current a, before pointing a.next to dummy.next; Node store_a_next_node=a.next; //insert Node "a" between dummy and dummy.next a.next=dummy.next; dummy.next=a; mergeAscend(store_a_next_node,b); } else if((a==null&&b!=null)||a.value<b.value){//insert "a" after dummy Node store_b_next_node=b.next; b.next=dummy.next; dummy.next=b; mergeAscend(a,store_b_next_node); } } return dummy.next; }
}
Need Helper for FunHomeBiz.com
Post edited: 6/14/20
context sensitive help – What is the name for a pattern featuring helper text in a semi-transparent overlay?
I am seeing an emerging design pattern in web apps that is used for helping new users get oriented to a page or application.
It consists of showing a diagram with succinct helper-text over a semi-transparent overlay, sometimes with arrows pointing to specific controls on the page. One of the best example of this I have seen is in UX Pin, an online wireframing/design tool.
Has anyone ever utilized this pattern – and if so, what is it called? Or how did you refer to it?
I am also interested in learning how it is accomplished. Is there a tool or plug-in that might be useful for achieving this effect, and is it possible to do this in a reusable fashion without placing static text in a transparent png?
magento2 – How to Use Helper Function in checkout_cart_index.xml?
I’ve to override checkout_cart_index.xml file from vendor/Magento for removing estimate shipping and discount code block!
It’s working successfully but my requirement is…it only should run when an admin or user select Yes from config part.I’ve tried ifconfig part but it’s not working. Can anyone suggest any better idea to fulfil this requirement!? Here’s my code of checkout_cart_index.xml. In this code set true in checkout.cart.coupon part is for removing Discount code and the above part is for for removing estimated shipping block and also override shipping.phtml
<action method="setTemplate"> <argument name="template" xsi:vender_extensionname::shipping.phtml</argument> </action> <arguments> <argument name="jsLayout" xsi: <item name="components" xsi: <item name="block-summary" xsi: <item name="config" xsi: <item name="componentDisabled" xsi:true</item> </item> </item> </item> </argument> </arguments> </referenceBlock> <referenceBlock name="checkout.cart.coupon" remove="true"/>
Here’s my config part
in which if user set to yes in discount code then it should be displayed but if one select NO then it should not be displayed.I’ve created helper data.php for fetching that yes no value but I’m confused in how to use helper function in my XML part. | https://proxies-free.com/tag/helper/ | CC-MAIN-2020-50 | en | refinedweb |
I will create an animation for birthday to send to your family friend or whoever you want to for a fair price
by: KirilMitev
Created: —
Category: Art & Design
Viewed: 193
.
Proxies-free.com: 100% Free Daily Proxy Lists Every Day!
Get Free Proxies Every Day
I will create an animation for birthday to send to your family friend or whoever you want to for a fair price
by: KirilMitev
Created: —
Category: Art & Design
Viewed: 193
.
I have my google calendar Bday and want to replicate from 2020 to 2021 without typing all of them again
In the Birthday problem, the formulas
$${displaystyle {begin{aligned}p(n;d)&={begin{cases}1-displaystyle prod _{k=1}^{n-1}left(1-{frac {k}{d}}right)&nleq d\1&n>dend{cases}}&approx 1-e^{-{frac {n(n-1)}{2d}}}&approx 1-left({frac {d-1}{d}}right)^{frac {n(n-1)}{2}}end{aligned}}}$$
work well for $d = 365$ and $n=23$, and gives the usual estimation that if you have 23 people in the same room, the probability to have at least two people born the same day is $geq 50 %$.
Question: what formula is available for $p(n; d)$ with more precise error terms?
Concrete application:
I’m using random 5-alphanumeric-character identifiers for an inventory of objects.
Example: V4QH7, WYJ9X, LK6H4, etc.
If I have $n = 10,000$ objects, what is the probability that at least 2 objects have the same ID?
Note: the last formula (the one after the one with exponential function above) gives
Error, numeric exception: overflow in Maple when I take $d=(26+10)^5=60,466,176$ and $n=10,000$.
The formula with exp gives $p approx 3.8 %$ but since no error term is given, I don’t know if this is accurate.
Today, 31 October 2020, being the monthly close, it is mandatory for the asset to maintain above $13900 or cross $14000 to increase the bull run pace. Read more
.
Suppose I want only a few of my friends to get notified. What should I do?
I am trying to build birthday and anniversary webpart in office 365. The issue is our local ad users are synced but the birthday and hire-date are not synced in office 365.
I was checking the issue and found that we were using Azure AD Connect and that is not supporting these columns. So I have created 2 custom attribute (birthdate & hiredate) in Azure directory. Then using directory extension attribute sync, I am mapping local ad column in azure ad.
Now I am stuck here. I don’t know how can I map these custom attribute with Office 365 columns. Can anyone provide any link how can I do these?
Join me in wishing @Joshua Farrell a wonderful 32nd birthday.
So this is a pretty simple code, I think it was?
It asks for the user’s birth day and month, and gives it back with the day a discount reminder email will be sent (the day before their birthday)
Now I tried to optimize as much as I could, possible wrong inputs and if the user’s birthday is the first of a month
Even though I’m still pretty new to coding, I still want you to criticize my code as much as I could, I would like to improve as much as I can
using System.Text.RegularExpressions; using System.Linq; namespace Exercice14 { class Program { static void Main(string() _) { // declaring some variables int birthDay; int reminderDay; string suffix = "th"; string reminderSuffix = "th"; string birthDayT; string birthMonth; string reminderMonth; string() months = {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" }; bool exceptionFirst = false; // prompts for birth month and capitalize first letter Console.WriteLine("Hello User!"); Console.Write("Please enter your birth month in letters: "); birthMonth = Console.ReadLine(); birthMonth = char.ToUpper(birthMonth(0)) + birthMonth.Substring(1); // check if birth month contains only letters while(Regex.IsMatch(birthMonth, @"^(a-zA-Z)+$") == false) { Console.WriteLine("Birth month should only contain letters!"); Console.Write("Please enter your birth month in letters: "); birthMonth = Console.ReadLine(); birthMonth = char.ToUpper(birthMonth(0)) + birthMonth.Substring(1); } // check if month is right while (months.Contains(birthMonth) == false) { Console.WriteLine("Invalid month?! Please enter a valid english month"); Console.Write("Please enter your birth month: "); birthMonth = Console.ReadLine(); birthMonth = char.ToUpper(birthMonth(0)) + birthMonth.Substring(1); } // prompts for birth day Console.Write("Please enter your birth day in numbers: "); birthDayT = Console.ReadLine(); // check for valid day while (int.TryParse(birthDayT, out int _) == false) { Console.WriteLine("Invalid argument! Please enter day in numerals"); Console.Write("Please enter your birth day in numbers: "); birthDayT = Console.ReadLine(); } // check for valid day number while (int.Parse(birthDayT) < 1 || int.Parse(birthDayT) > 31) { Console.WriteLine("Invalid date! Please enter a day between 1 and 31"); Console.Write("Please enter birth day in numbers: "); birthDayT = Console.ReadLine(); } // assign birth day to variable once tested birthDay = int.Parse(birthDayT); // set reminder day and month reminderDay = birthDay - 1; reminderMonth = birthMonth; // check which suffix to use for days AND calculate reminder day and month if exception if (birthDay == 1) //exception { exceptionFirst = true; suffix = "st"; reminderMonth = months(Array.IndexOf(months, birthMonth) - 1); } if (birthDay == 2) { suffix = "nd"; reminderSuffix = "st"; reminderDay = 1; } if (birthDay == 3) { suffix = "th"; reminderSuffix = "nd"; } if (birthDay > 3) { suffix = "th"; reminderSuffix = "th"; } // print values Console.WriteLine(); Console.WriteLine("Yer birthday is on the " + birthDay + suffix + " of " + birthMonth ); if (exceptionFirst == true) { Console.WriteLine("A reminder email for your birthday discount " + "nwill be sent on the last day of " + reminderMonth); } else { Console.WriteLine("A reminder email for your birthday discount " + "nwill be sent on the " + reminderDay + reminderSuffix + " of " + reminderMonth); } } } }
I need someone to help me with emu8086
Q1: Write a program that includes:
1- Ask for your name
2- Ask about your birthday
3- Enter your phone number
4- Ask for your address
The program then displays this information about you as follows:
Name Date of birth Telephone number Address
——– ———— —————— ———–
Q2: You have the following string:
"Aaabjhjhjkhaaaboioewiujdasdaaabkjlaabaopioioaaabaaaaaab"
Write a program that determines how often the pattern "aaab" appears in the string.
Display the specified string at (5,10).
Display the pattern you are looking for under (6,10).
Time of the pattern Appearance of the pattern at (7.10) | https://proxies-free.com/tag/birthday/ | CC-MAIN-2020-50 | en | refinedweb |
Trajectories
Trajectories are useful for storing the positions of the atoms in a long molecular dynamics run or in a structural optimization run. Here is a typical use case:
>>> from ASE.Trajectories.NetCDFTrajectory import NetCDFTrajectory >>> from ASE.Dynamics.VelocityVerlet import VelocityVerlet >>> from ASE.Calculators.PairPotential import PairPotential >>> from ASE.IO.NetCDF import ReadNetCDF >>> water = ReadNetCDF('water.nc') >>> water.SetCalculator(PairPotential()) >>> dyn = VelocityVerlet(water, dt=0.05) >>> traj = NetCDFTrajectory('traj.nc', water, interval=5) >>> dyn.Attach(traj) >>> dyn.Run(steps=100)
In this example, we are doing molecular dynamics using the Velocity Verlet algorithm. The line
>>> traj = NetCDFTrajectory('traj.nc', water, interval=5)
creates a trajectory object. The configurations are written to a netCDF file ('traj.nc').
The dyn.Attach(traj) command tells the dynamics object to notify the trajectory object after every move of the atoms (the trajectory is an observer, and the dynamics is a subject - see the observer pattern). The trajectory object was told to only write every fifth configuration to file (the interval=5 argument).
The dynamics object will call the Update() method of the trajectory - this method can also be called manually.
Opening a netCDF trajectory file
If no ListOfAtoms object is supplied in the NetCDFTrajectory constructor, the netCDF file is opened in read mode. If a ListOfAtoms object is pressent, write mode is used, and a new file is created (an existing file will be renamed to have a .bak extension). There is an optional mode argument with two possible values: 'new' will try to create a new netCDF file and report an error fi the file exists, and 'append' will append to an existing file.
Getting information out of a trajectory
For getting information out of a trajectory, the following methods can be used:
- SetFrameNumber(frame):
- Set the current frame number.
- GetFrameNumber():
- Get the current frame number.
- GetListOfAtoms(frame=None):
- Create a new ListOfAtoms object for configuration number frame (defaults to the current frame number).
- Get(name, frame=None):
- Get the named data for configuration number frame (defaults to the current frame number).
Examples:
>>> diff = (traj.Get('CartesianPositions', -1) - # last ... traj.Get('CartesianPositions', 0)) # first >>> traj.Get('AtomicNumbers') array([8, 1, 1]) >>> water2 = traj.GetListOfAtoms()
A netCDF trajectory can be opened in read only mode like this:
>>> traj = NetCDFTrajectory('traj.nc')
By default, the following quantities are allways put in a netCDF trajectory: 'AtomicNumbers', 'CartesianPositions', 'UnitCell' and 'Tags'. Those of 'PotentialEnergy', 'CartesianForces' and 'Stress' that are available are also put in. The tags and the atomic numbers are written once only. The default behavior can be changed with the Add(name) and Delete(name) methods:
>>> t = NetCDFTrajectory('traj.nc', water) >>> t.Delete('CartesianForces') >>> t.Add('UnitCell', once=True)
This trajectory will have no forces and the unit cell is only written once.
Note
The length and energy units used in the Python session that ceated a netCDF trajectory will always be included in the file. When using a trajectory in a Python session with different units, conversion will take place automatically.
Conversion has not been tested yet!
Adding user defined data
The Add(name, ...) method can be used to add almost anything to a trajectory. The add method has the following optional arguments:
- data:
- A callable returning the data in a numarray (or the actual data, in case the data does not change)
- shape:
- The shape of the numarray. Use the string 'natoms' for the number of atoms.
- typecode:
- The typecode for the numarray (num.Int, num.Float, ...).
- once:
- A boolean deciding whether the data should be written once or for every update.
- units:
- A tuple of two integers...
For the missing optional arguments, the Add() method will try to make good guesses for default values:
- If name is one of the built-in quantities of the ListOfAtoms object, then everything is known.
- If data is given as a numarray or a callable returning a numarray, then the shape and the typecode can be extracted. However, this will call the callable - this can be avoided by supplying the shape and typecode.
- once will default to False - write every time.
- units will default to (0, 0) - a pure number.
Suppose you want to include one integer for each atom, once:
>>> data = num.array([1, 2, 3]) >>> t = NetCDFTrajectory('traj.nc', water) >>> t.Add('Stuff', data, once=True)
Or if you have a function that calculates something from a molecule (something = calculate_something(water)):
>>> def data(): ... return calculate_something(water) ... >>> t.Add('Something', data)
Note
Float32 <--> Float64 does not work!
The plottrajectory tool
plottrajectory will open a rasmol window for wieving the atomic structure, a Gnuplot window for showing xy data and a Tk window for changing the current frame number. By default the Gnuplot window will show the total energy as a functions of the trajectory frame number.
The plottrajectory tool is called from the command line and the syntax is:
plottrajectory [-r R1 R2 R3] [-u usermodule] [-p "datamethods"] trajectory
options
By using the -r option you can repeat the atoms R<x> times the <x>-axis.
By using the -p option you can plot xy data using the gnuplot window, default being (frame number,total energy) plot. The syntax is:
-p "[list,list,..]"
list will here correspond to a separate gnuplot window. Each list can have one or more elements that will be added to the gnuplot window, some examples:
By using the -u <usermodule> option you can import methods defined in your own module, the method must be functions or callable classes, taking a listofatoms object as argument.
An example could be the file distance.py calculating the distance between two atoms:
import Numeric as num class Distance: """Returns the distance between atom number ``a`` and ``b``.""" def __init__(self, a, b): self.a = a self.b = b def __call__(self, atoms): pos = atoms.GetCartesianPositions() d = pos[self.a] - pos[self.b] return num.sqrt(num.dot(d, d)) def __repr__(self): return 'Distance bewteen atom %d and %d' % \ (self.a, self.b)
Now you can test the method using:
plottrajectory -p "[[Distance(0,1)]]" -u distance.py mytraj.nc
If you think that your method could be of use to others, please consider sending it to the mailing list, and we will add it to the utilities allready defined.
At the moment the following methods are defined:
- Geometry:
-
- Position(atom-number,coordinate): correspond to the ListOfAtoms
- method atoms.GetCartesianPositions()[atom-number, coodinate]
- Force(atom-number,coordinate): correspond to the ListOfAtoms
- method atoms.GetCartesianForces()[atom-number,coordinate]
MaxForce: The magnitude of then largest force acting on any atom.
Volume: Volume of the unitcell.
Distance(atom1,atom2): The distance between atom1 and atom2.
- Molecular-Dynamics:
-
Temperature: The temperature of the atoms.
PotentialEnergy: The total potential energy of the atoms.
So if you want to plot the Total energy as a function of some distance between two atoms you can use:
or if you want to plot the temperature and total energy in two plots you can do: | https://wiki.fysik.dtu.dk/ase2/Trajectories?action=diff&rev1=5&rev2=6 | CC-MAIN-2020-50 | en | refinedweb |
Raw, unedited, insightful and deep content from our internal WPF discussion…
Subject: Any chance of INotifyCollectionChanged moving to the core .NET libraries?
The System.Collections.Specialized.INotifyCollectionChanged interface is defined in WindowsBase.dll (primarily a WPF assembly) but my customer believes it should be pushed down into the core BCL so that other project types can benefit from it without requiring a reference to WPF/WindowsBase.
Is this planned for 4.0 or later?
Answer:
INotifyCollectionChanged, ObservableCollection<T>, and ReadOnlyObservableCollection<T> have been type forwarded into System.dll for .NET 4.
Subject: Why TextRange.Load(MemoryStream, DataFormats.Xaml) can not work?
I am trying to load a flowdocument from a Xaml file using TextRange.Load(MemoryStream, DataFormats.Xaml) method, but the following code does not work:
FlowDocument f = new FlowDocument();
TextRange range = new TextRange(f.ContentStart, f.ContentEnd);
StreamReader sr = new StreamReader(@"TextFile1.xaml");
string aa = sr.ReadToEnd();
MemoryStream ms = new MemoryStream(System.Text.Encoding.Default.GetBytes(aa));
range.Load(ms, System.Windows.DataFormats.Xaml);
I know if I use XamlReader.Load method will do the trick, but I don’t understand why the above code can’t work. Is this a bug or I am missing something?
Answer:
DataFormats.Xaml (and XamlPackage) are misleading – when consumed by TextRange.Load they only work on a subset of xaml generated by TextRange.Save. The TextRange.Save/Load api is intended for editing scenarios where you need to persist arbitrary selections that may cross element boundaries (e.g., you need to save half of a Paragraph).
For your scenario, where you’re loading loose xaml from a file, XamlReader is the way to go.
Subject: What is "Unlabeled time" in Visual Proiler that is part of Performance Profiling Tools for WPF?
Answer:
Any CPU time that we aren’t able to attribute to a specific WPF operation goes in the unlabeled bucket. This includes time spent in CLR operations such as garbage collection and time spent in the application’s own code.
Subject: Accessibility issue with WPF DataGrid and NewItemPlaceholder
We’ve run into an accessibility issue with the WPF DataGrid concerning the NewItemPlaceholder. Specifically, we’re re-templating the placeholder control in order to add a “real” click-here-to-add-a-new-item button. However, none of the UI elements in the new template are available to accessibility-based automation (i.e. the button does not show up in UISpy or AccExplorer). The template is set during the DataGrid LoadingRow event and looks like this:
<ControlTemplate x:
<SelectiveScrollingGrid>
<Button Content="Click here to add a new item." Margin="5,5,0,0" x:
</SelectiveScrollingGrid>
</ControlTemplate>
Answer:
Yeah the AutomationPeers for all of the DataGrid components are structured around rows and cells. In your scenario you are retemplating a row (NewItemPlaceHolder row) so it does not have cells. Unfortunately the AutomationPeers do not recognize this. So you will in fact need to jump through some hoops to enable this scenario.
- Subclass DataGridRowAutomationPeer and override GetChildrenCore to special case NewItemPlaceHolder row and return the visual children instead of the cells.
- Subclass DataGridRow and override OnCreateAutomationPeer to return the subclassed DataGridRowAutomationPeer.
- Subclass DataGrid and override GetContainerForItemOverride to return the subclassed DataGridRow.
Subject: Adding ContextMenu to DataGrid header
I am using WPF 4.0 DataGrid and want to add context menu only to the header. The way that I found to do is to add a ContextMenu setter property to a style and assign the style to the DataGrid.ColumnHeaderStyle.
Answer: This is the right way of setting the context menu on DataGridColumnHeader (either using an implicit style for DataGridColumnHeader or setting it explicitly using ColumnHeaderStyle property).
But this puts restriction that a style needs to be added to the ColumnHeader. Performing SetValue on “DataGridColumnHeader.ContextMenuProperty” somehow sets it for the entire grid. The context menu should only be for the header.
Answer: This won’t work. When one uses ‘DataGridColumnHeader.ContextMenu’ property, note that one is using the property which internally happens to be the same for DataGrid too (because ContextMenu is defined on FrameworkElement and all controls share the same property definition). Hence that would end up setting the value of DataGrid itself.
Is there any way to add context menu only to the header w/o having to set ColumnHeaderStyle?
Answer: Use implicit style for DataGridColumnHeader. But honestly I don’t see any difference from using ColumnHeaderStyle.
Subject: Adding HierarchicalDataTemplate via UI?
How can I add a HierarchicalDataTemplate for Silverlight 3 via the Blend 3 UI (without typing in the class in XAML)?
Answer:
Steps:
1. Create a new sample data
2. Click on the triangle of “add simple property”, select “convert to Hierarchical Collection”
3. Drag the converted Hierarchical Collection to artboard
View the xaml, notice the Blend generated HierarchicalDataTemplate is bound to the generated tree view.
Here is a more complete TreeView editing experience write-up:
Subject: debugging an unmanaged exception in WPF app.
tried to debug my application I saw the following message “The debugger does not support debugging managed and native code at the same time on this platform.”
How could I enable only native code debugger without managed one?
Answer:
There may be other ways, but this should work.
Right-Click on the solution, select “Add Existing Project” and select the compiled exe.
Set the exe project as the startup project.
Right-Click, go to Properties and set “Debugger Type” to “Native Only”
Subject: API that can determine whether a keystroke yields printable characters or not
I have a KeyDown event and System.Windows.Input.KeyEventArgs event arg e.
Is there any API that I can use to determine whether this e.Key is printable or not? Or is there any API I can get the character of this keystroke?
Answer:
Generally, no. KeyDown to text conversion is pretty stateful, and you don’t have access to this state. Most of it is maintained by the OS.
Subject: XBAP and multi touch in Windows7
Will all multi touch features in WPF 4 be available when running on Windows 7 as XBAPs as well?
Answer:
Yes, manipulation and touch events are available in PT. However you cannot implement your own touch device.
Subject: X:Name and Name property
I have a quick question on why do we have Name for some of the Classes and have to use x:Name for the classes related to Animation/Timelines. Why is this difference in the framework and no uniformity?
Answer:
XAML uses [RuntimeNameProperty(“somePropName”)] on classes to signify when a property is the “Name” property (and thus aliased with x:Name).
XAML treats the two interchangeably when a type does have a RutimeName declared.
If a tag has both, that is an error.
Reasons for where we ended up:
· We wanted WPF to not have to require 2 namespaces to have a Named object.
· We didn’t want to hard code all “Name” properties as the Name.
Subject: WPF colour depth rendering support
Hi all,
I'm working on an application that displays DICOM images, the spec allows for images to have greater than 8 bits per channel which most if not all of my test data does have and is very common in medical images. I read on that "The WPF rasterizer only natively renders bitmaps in Bgr32 and PBgra32 formats" (In the 'WIC in WPF' section, 5th paragraph), is there any way to have a WPF application render an image in PixelFormats.Prgba64 or PixelFormats.Gray16 (or an equivalent)?
Thanks,
Answer:
No. As Dwayne’s blog notes, the WPF rendering pipeline operates in 32-bit only, so all images are format converted to that before being processed.
Subject: Workaround for IsDeferredScrollingEnabled bug
When using a ComboBox inside of a ScrollViewer with IsDeferredScrollingEnabled=true scrolling in the ComboBox does not work correctly.
Answer:
You’ve certainly hit a bug and we are recording one for this scenario. As for a workaround, you can set IsDeferredScrollingEnabled to true for the ComboBox as well to get all the scrollviewers to work correctly. Like this.
<StackPanel.Resources>
<Style TargetType="ComboBox">
<Setter Property="ScrollViewer.IsDeferredScrollingEnabled" Value="true" />
</Style>
</StackPanel.Resources>
Subject: WindowStyle.None + ResizeMode.CanResize vs. NoResize
I’m trying to recreate a window like the volume window, where there’s minimal chrome and cannot be resized.
WindowStyle.None renders a window without a title bar. ResizeMode.NoResize will make a window not resize (surprise, surprise). Unfortunately, when you use them both together, there is no window chrome at all. I’ve tried setting the min/max height/width, which works, but I still get the resize mouse icon when I hover over the window border. Is it possible to render a minimal chrome without the resize icon?
Answer:
I think you should be able to use what you have and p/invoke to set the Window's native style to include WS_BORDER.
There isn't a combination in WPF's Window that supports what you're trying to express.
Subject: Window chrome size
Is there a way to get the height of the title bar and width of little border on the left in a window. I’m trying to create an overlay window via databinding (would use popup, but it doesn’t seem to respect bindings like Window does) so I need to add in the chrome size to find the global position of the window’s client area. Or maybe there is an easier way?
Answer:
You might try Visual.PointToScreen to see if it does what you want. I think it gives coordinates in actual device pixels, so if you need to position your overlay using WPF's 1/96" units, you'll have to multiply by 96/systemDPI. With some effort, you can get this scale factor from HwndTarget.TransformFromDevice.
Subject: RE: Viewing WPF applications over Citrix
What would be the implications of running WPF over Citrix.
Answer:
All versions of WPF since WPF 3.5 SP1 have remoted (both with Remote Desktop and Terminal Server) using Bitmap Remoting.
Bitmap remoting works as follows:
- The application is rendered on the server using WPF’s software rasterizer
- As the application runs, the server keeps track of which regions of the application’s window are newly dirty and need to be updated
- When a region needs to be updated, the server creates a compressed bitmap of just the dirty region and sends that to the client
- Once the client has drawn that bitmap to the appropriate place on its own window, the client window is up-to-date
Given how this remoting mechanism works, performance can be maximized in several ways:
- Dirty regions should be kept as small as possible so the least amount of data is sent over the wire
- Ambient animations should be turned off.
- For instance, setting a window background to an animating gradient would cause the entire window to be invalidated / redrawn every frame
- The system does not optimize away occluded parts of the application
- For instance, an animation that is completely hidden behind some other opaque element will still cause dirty region invalidation / bitmap generation to occur. Remove these from your application.
- Dirty regions should be created as infrequently as possible
- Turn off as many animations as possible
- For those animations that can’t be eliminated completely, lower the animation framerate using the DesiredFramerate property
- Dirty Region Bitmaps should be as simple as possible to maximize their compression
- Application running over TS should favor solid colors over gradients or other exotic fills (unnecessary images, etc), especially for application pieces that will be redrawn frequently
- Avoid operations that are especially slow when rendered in software
- BitmapEffects / Effects / ShaderEffects, especially blurs and drop shadows with large radii, are quite slow in software
- 3D – the 3D software rasterizer is substantially slower than rendering in hardware
Subject: Spell checker does not pick up the current input language if it changes
If I create an empty application, with:
<TextBox SpellCheck.
And I enter text in English, all is good. If I delete all my text in that textbox, change my input language to German (for example), and input German words, it will still be spell checking in English. If I restart the app with my input language set to German, it will spell check correctly.
Answer:
It’s by design. TextBox doesn’t track input language (unlike RichTextBox, which will tag every character entered with the current input language). TextBox can’t track the input language of individual characters because it contains nothing but Unicode. So,
1. If TextBox.Language is set, the speller will respect it.
2. If TextBox.Language is unset/default (en-us), the speller uses a heuristic, reading the input language set when spelling is enabled.
Language is an inheritable property, so setting it on any parent element will affect a TextBox. The speller will honor any Language value that is not default -- local, inherited, or styled.
Subject: turn off bitmap effects?
Is there a registry setting to turn off bitmap effects?
Answer:
No there isn’t. But in 4.0 all the *BitmapEffect classes are either no-opd or redirected to their *Effect equivalents (for the case of DropShadow and Blur).
Subject: RadioButton/ToggleButtons with DataBinding
Does anyone have an example of a group of RadioButtons or ToggleButtons working with DataBinding?
We've found it quite difficult to get this working in a straightforward way.
Answer:
Ralph, yes there are known issues about RadioButtons and other Controls setting local values upon user interaction and hence clobbering app set Bindings in .Net 3.5
I've worked around this issue in .NET 3.5 SP1. Here's how I data bind a group of radio buttons to an enum-valued property:
<StackPanel>
<RadioButton Content="New folder"
IsChecked="{Binding Path=PublishTarget,
Converter={StaticResource equalityConverter},
ConverterParameter={x:Static local:PublishTarget.NewServerFolder},
Mode=TwoWay}"
GroupName="1"/>
<RadioButton Content="Existing folder"
IsChecked="{Binding Path=PublishTarget,
Converter={StaticResource equalityConverter},
ConverterParameter={x:Static local:PublishTarget.ExistingServerFolder},
Mode=TwoWay}"
GroupName="2"/>
<RadioButton Content="Local folder"
IsChecked="{Binding Path=PublishTarget,
Converter={StaticResource equalityConverter},
ConverterParameter={x:Static local:PublishTarget.LocalFolder},
Mode=TwoWay}"
GroupName="3"/>
</StackPanel>
Setting each radio button's GroupName to a unique value prevents the bindings from getting clobbered when the user clicks on a radio button. Here I'm relying on the data source to implement INotifyPropertyChanged, which will tell the other radio buttons to update. A similar approach should work for radio buttons in an ItemsControl. | https://blogs.msdn.microsoft.com/jaimer/2009/09/22/wpf-discussion-090922/ | CC-MAIN-2018-22 | en | refinedweb |
Using Activity Properties
There are two types of activity properties: meta properties and instance properties. A meta property is immutable at run time. Therefore, the property must be set to a literal value at design time. An instance property can be set at design time, or the property can be bound to instance data, in which case the actual value is not determined until run time. Instance properties can also be modified directly during workflow execution..
Property Attributes
The custom activity developer must be aware of a set of attributes when defining properties on an activity class. The Windows Workflow Foundation ValidationOptionAttribute type is used to facilitate automatic validation of meta property values for an activity. This also means that any ValidationOptionAttribute values set for instance-based dependency properties are ignored by the DependencyObjectValidator.
The following table lists the relevant .NET Framework attributes in the System.Workflow.ComponentModel namespace. | https://msdn.microsoft.com/en-us/library/ms734789(v=vs.85).aspx | CC-MAIN-2018-22 | en | refinedweb |
November 8, 2016 | Written by: Andrew Ferrier
Categorized: Compute Services | How-tos.
One of the things the IBM Bluemix platform (based on Cloud Foundry) supports is logging to external providers. Takehiko Amano’s excellent article on Splunk integration with Bluemix describes the solution.
Splunk is a popular platform for log aggregation. It provides an easy way to aggregate logs from multiple sources, providing a way to index and search them in multiple ways. You can find additional details on the Splunk website.
Takehiko’s solution is excellent, but still requires somewhere to deploy Splunk. However, Bluemix itself provides the IBM Containers offerings (based on Docker technology) where Splunk can be run. This isn’t suitable for robust production environments, where you’d want the logging to be externalised from the Bluemix infrastructure, but for “quick ‘n’ dirty” testing, it’s really useful. I’ve documented some steps below that you’ll need to follow to get this up and running with Splunk Light (the simpler, lighterweight edition of Splunk).
Prerequisites
- You have a git client installed locally.
- You have signed up for IBM Bluemix (Public) and have a login ID.
- You have installed Docker locally on your machine. This will give you access to the Docker command-line tools.
- You have installed the IBM Bluemix command-line tools, which will require first installing the prerequisite Cloud Foundry command line tools.
Instructions for this tutorial are written assuming you are using OS X, although they can probably be adapted to other operating systems fairly easily.
Build the Splunk Container
You need to build the Docker container for Splunk locally before pushing it up to the Bluemix containers repository. There’s already a well-established GitHub project for a Splunk Docker container, but we need to add the RFC5424 add-on as per Takehiko’s article to get Splunk to recognize the logging format.
I’ve already forked the GitHub repository and added most of the changes required to do that, but you will need to download the add-on itself first.
- Open a terminal and clone my repository, checking out the bluemix branch:
git clone -b bluemix
- Download the RFC 5424 add-on. You’ll need to sign up for a free splunk.com ID if you don’t already have one. Put the
.tgzfile in the splunklight directory inside your checked-out git repository.
- Now build the Docker image (which may take a little while):
cd/splunklight
docker build -t andrewferrier/splunk:latest-light .
(If you wish, you can substitute your own namespace prefix in place of
andrewferrier – as long as you use it consistently below).
Push the Splunk Container up to Bluemix and start running it
First, log into Bluemix and initialize the container runtime:
bx login
bx ic init
You’ll need to specify an organization and space to work within Bluemix.
Next, double-check what your IBM Containers “namespace” is. If you’ve worked with Containers before, you probably already have one specified. You can check it with
bx ic namespace-get. If you haven’t, you’ll need to set one with
bx ic namespace-set (I use
andrewferrier, for example; but you can set it as anything that’s meaningful to you—it will have to be unique across all users using shared Bluemix).
Now, tag your built image to prepare it for upload to the remote registry:
docker tag andrewferrier/splunk:latest-light
registry.ng.bluemix.net/andrewferrier/splunk:latest-light
(Note that the first
andrewferrier above is the prefix we specified previously when we built the image. The second is the namespace on Bluemix itself as just discussed. If you want to work with the UK instance of Bluemix, rather than the US one, change all references to
.ng. to
.eu-gb.)
Now, actually push the image to the remote registry (this may take a little while):
docker push registry.ng.bluemix.net/andrewferrier/splunk:latest-light
Next, we need to create some persistent volumes for both the
/opt/splunk/etc and the
/opt/splunk/var filesystems within the container:
bx ic volume create splunk-etc
bx ic volume create splunk-var
Start running the container. Notice that we’re exposing two TCP ports,
8000 (which will be used over HTTP to access the Splunk console), and
5140 (which will be used to push syslog messages from Bluemix to Splunk).
bx ic create -m 1024 -p 8000 -p 5140 --env SPLUNK_START_ARGS="--accept-license" --volume splunk-etc:/opt/splunk/etc --volume splunk-var:/opt/splunk/var registry.ng.bluemix.net/andrewferrier/splunk:latest-light
Once the container has started running, the Bluemix CLI will print out the container ID. You typically only need the first few characters—enough to uniquely identify it (e.g.
abc1234).
Now check which public IP addresses you have free to assign to the container:
bx ic ips
This should print a list of IPs (probably two if you’re working with a trial Bluemix account)—pick any IP which is not assigned to a container (if you have no unassigned addresses, you’ll either need to pay for more or unbind one from an existing container first). Now bind that IP address to your newly created container:
bx ic ip-bind 1.2.3.4 abc1234
Next, you’ll need to create a user-provided service to stream the logs from your application(s) to Splunk:
bx cf cups splunk -l syslog://1.2.3.4:5140
Setting up a TCP listener within Splunk
Now we need to set up a data listener within Splunk to listen for data on TCP port
5140 (essentially, this is the same procedure as Takehiko’s original article).
Open the Splunk console in a browser using the URL (obviously, change the IP address for the one you picked above). Log in using the default username/password pair
admin/changeme (Splunk will then encourage you to immediately change the password, which you should).
On the home screen, click “Add Data” to add a data source:
Select “Monitor”:
Select “TCP/UDP” to add a TCP-based data listener:
Enter Port
5140 (the same port we exposed from the Splunk Docker container above):
rfc5424_syslog as the source type (which corresponds to the Splunk add-on we installed previously). You may find it easiest to type
rfc into the dropdown box to select this. Also, you may want to create a new index to index data from Bluemix. In this case, I’ve created one called
bluemix:
Review the settings you’ve entered and add the data listener.
Clone and push a demo application
In this article, we’ll clone a sample Node.JS application locally and then push it to Bluemix, so we can bind it to the user-provided service we just defined to use it to test the Splunk integration.
cd <some_temporary_directory>
git clone
cd get-started-node
curl > manifest.yml
Now edit manifest.yml it to change name and host to a unique name (e.g.
TestAppForSplunkAF (note that this name must be unique within the whole of Bluemix, which is why I use my initials to make this unique).
You also need to modify lines of the
server.js file to look like this:
var port = process.env.VCAP_APP_PORT || 8080;
(This ensures that the application will pick up the correct port number from the Bluemix deployed environment).
Now push the application up to Bluemix:
bx cf push
Bind that service to any application you wish:
bx cf bind-service TestAppForSplunkAF splunk
And restage each application:
bx cf restage TestApp
Testing the logging mechanism
Probably, just in the act of re-staging your application, you’ll already have generated some logs. However, to make things a bit more interesting, open the endpoint for your application (e.g. or similar, modify for the name of your application!) in a browser, and refresh it a few times.
Now, you should start to see your logging information appearing through Splunk. Assuming you set Splunk up as shown above, and created a new non-default index called
bluemix, you should simply be able to search for everything in the
bluemix index:
You should see some search results appear like this:
Further steps
The world is now your Oyster! You can use any standard Splunk searching mechanism to find logs.
Any questions or comments, please contact me @andrewferrier on Twitter.
Andrew is a Worldwide Bluemix Solution Architect, taking responsibility for end-to-end adoption of IBM Bluemix and providing guidance in the Bluemix Garage. Previously, he led the IBM Cloud Services Mobile Practice in Europe, working with IBM customers on the IBM MobileFirst platform. Andrew has presented extensively on Mobile, Dojo, REST, and Web APIs, contributing Intellectual Capital to the IBM and WebSphere communities, as well as writing two Redbooks, and numerous posts on IBM Mobile Tips ‘n’ Tricks ‘n’ Tricks and SOA Tips ‘n’ Tricks, both of which co-founded. When not in the Garage, Andrew can be found speaking at conferences, such as IBM InterConnect and the European WebSphere Technical Conference.
Recent Posts
- Promotion extended for IBM Cloud bare metal and virtual servers
- Deploying to IBM Cloud Private 2.1.0.2 with IBM Cloud Developer Tools CLI
- What the stats say about container development
- New fast and flexible Veeam backup solutions to IBM Cloud
- IBM Cloud Garage Method Field Guide
Archives
Deploying to IBM Cloud Private 2.1.0.2.. | https://www.ibm.com/blogs/bluemix/2016/11/using-splunk-docker-container-bluemix/ | CC-MAIN-2018-22 | en | refinedweb |
Once you create your own header file, you include it by surrounding the filename with straight double quotation marks in the directive:
#include "filename.h"
If no pathname is indicated, as in the previous code, the compiler will search the current directory, as well as other logical directories for that environment, to find the file. To eliminate this guesswork, you can use relative pathnames when including your own header files. To state that the file is in the same directory as the including (or parent) file, use an initial period followed by a slash:
#include "./filename.h"
If the file is in a subdirectory, begin with that subdirectory's name:
#include "includes/filename.h"
Finally, if the included file is located ...
No credit card required | https://www.safaribooksonline.com/library/view/c-programming-visual/0321287630/0321287630_ch08lev1sec6.html | CC-MAIN-2018-22 | en | refinedweb |
BufferedReader
public
class
BufferedReader
extends Reader
BufferedReader
BufferedReader (Reader in, int sz)
Creates a buffering character-input stream that uses an input buffer of the specified size.
BufferedReader
BufferedReader (Reader in)
Creates a buffering character-input stream that uses a default-sized input buffer.
Public methods
void close ()
Closes the stream and releases any system resources associated with it. Once the stream has been closed, further read(), ready(), mark(), reset(), or skip() invocations will throw an IOException. Closing a previously closed stream has no effect.
lines.
mark
void mark (int readAheadLimit)
Marks the present position in the stream. Subsequent calls to reset() will attempt to reposition the stream to this point.
markSupported
boolean markSupported ()
Tells whether this stream supports the mark() operation, which it does.
read
int read ()
Reads a single character.Line
String readLine ()
Reads a line of text. A line is considered to be terminated by any one of a line feed ('\n'), a carriage return ('\r'), or a carriage return followed immediately by a linefeed.
See also:
ready
boolean ready ()
Tells whether this stream is ready to be read. A buffered character stream is ready if the buffer is not empty, or if the underlying character stream is ready.
reset
void reset ()
Resets the stream to the most recent mark.
skip
long skip (long n)
Skips characters. | https://developer.android.com/reference/java/io/BufferedReader | CC-MAIN-2018-22 | en | refinedweb |
Ascend to the parent of the current node
#include <pps.h> pps_decoder_error_t pps_decoder_pop(pps_decoder_t *decoder);
libpps
The function pps_decoder_pop() ascends to the parent of the current object or array. Following this call, the current node will be the node that follows the object or array popped out of (that is, the sibling of the node that was current at the time this function was called).
QNX Neutrino | http://www.qnx.com/developers/docs/7.0.0/com.qnx.doc.pps.developer/topic/api/pps_decoder_pop.html | CC-MAIN-2018-22 | en | refinedweb |
Taming the Snoo: Playing with the Reddit API
Reddit is a social networking, entertainment, and news website where the content is almost exclusively submitted by users. According to this report, in February 2016 Reddit had 36 million user accounts, 231 million unique monthly visits, and 11.464 active communities. A recent study also showed that 80% of Reddit users get their news from there.
Reddit also offers its own API. This way, we can use all the information available on Reddit to enrich our own websites or build our own Reddit clients. In this article, we will tackle some basic Reddit API usage with PHP.
The Reddit API
The Reddit API is extensive and very well documented, from private methods that are only accessible through authentication (Reddit uses OAuth2), to public methods that we can use with a basic HTTP call.
In this article, we’ll first focus on the
search method. While this is a public call (it does not require authentication), it is also one of the most powerful ones, since it allows us to access all of the history of Reddit posts in every subreddit.
The search method
The
search method is available through a basic HTTP request and has a lot of properties. Looking at the documentation, we can see that it supports the
HTTP GET method and is available through[/r/subreddit]/search
We also have the following arguments available:
after,
before,
count,
include_facets,
limit,
q,
restrict_sr,
show,
sort,
sr_detail,
syntax,
t, and
type. The table below can be found in the documentation, and shows every argument with more detail.
We will focus on the
q,
limit,
sort and
restrict_sr arguments.
The
q argument is the most important one and indicates the query for which we will search the subreddit in question. An example of usage would be:
This particular call will search for the
oop expression in the
php subreddit. If you try to make the call using your browser, you will see the results (just copy and paste the link in your browser).
The
limit argument limits the number of posts that the returned list will have. An example of usage would be:
This particular search would return the first 5 results of searching for the
oop expression in the
php subreddit.
The
sort argument will sort the searched posts using one of the five Reddit order properties:
hot,
new,
relevance and
top. So, if we want to search the
php subreddit for newer
oop posts we could make the following call:
The
restrict_sr is a boolean value that indicates whether or not we want to restrict our search to the current subreddit. If we pass 0, we will be searching all of Reddit.
All the properties can be combined to make more refined searches.
Adding PHP
Being able to call the API through our browser is fun, but we want something more powerful. We want to be able to parse and use the information we get in a lot of different ways.
For this example on using the Reddit API with PHP we could use cURL, but while having
cURL skills can be useful, nowadays it is a rather outdated tool. We will use a library called Guzzle and install it with Composer.
composer require guzzlehttp/guzzle
For this part of the project, not only will we use Guzzle, we will also use the rest of the arguments we discussed earlier.
<?php namespace ApiReddit; require_once './vendor/autoload.php'; class Searcher { /** * This method queries the reddit API for searches * * @param $subreddit The subreddit to search * @param $query The term to search for * @param $options The filter used to search * @param $results The number of results to return * **/ public function execSearch($subreddit = 'php', $query, $options, $results = 10) { //Executes an http request using guzzle $client = new \GuzzleHttp\Client([ 'headers' => ['User-Agent' => 'testing/1.0'], 'verify' => false]); $response = $client->request("GET", '' . $subreddit . '/search.json', ['query' => 'q=' . $query . '&sort=' . $options . '&restrict_sr=1&limit=' . $results ]); $body = $response->getBody(true); return $body; } }
In this search, we added more arguments to further refine our search. Now we have subreddit, options, and results (which is set to 10 by default).
Next we will create an
index.php file that will query the Reddit API. We will use Twig to render our view and show the results in a table. Then we will create a
/templates folder in the root of our project. This folder will hold our Twig templates.
composer require twig/twig
The
index.php file:
<?php require __DIR__.'/vendor/autoload.php'; use ApiReddit\Searcher; //Executes a new search $search = new Searcher(); $json=$search->execSearch( 'php', 'composer', 'hot', 5); $data = json_decode($json); //Loads the results in Twig $loader = new Twig_Loader_Filesystem(__DIR__.'/templates'); $twig = new Twig_Environment($loader, array()); //Renders our view echo $twig->render('index.twig', array( 'results' => $data->data->children, ));
After loading Twig, we tell it where we store our templates and to render
index.twig.
We also want to create a
resources/style.css file to style our results. This file contains the following:
#posts td, #posts th { border: 1px solid #ddd; text-align: left; padding: 8px; } #posts th { padding-top: 11px; padding-bottom: 11px; background-color: #4CAF50; color: white; }
Finally, we will create our template file keeping in mind both our results and the CSS. Inside the
/templates folder, let’s create an
index.twig file:
<!DOCTYPE html> <html> <head> <meta http- <title>Reddit Search Table</title> <link rel="stylesheet" type="text/css" href="resources/style.css"> </head> <body> <table id='posts'> <tr> <th>Title</th> <th>Ups</th> <th>Downs</th> <th>Created At</th> <th>Subreddit</th> <th>Number of Comments</th> <th>Link</th> </tr> {% for item in results %} <tr> <td>{{ item.data.title }}</td> <td>{{ item.data.ups }}</td> <td>{{ item.data.downs }}</td> <td>{{ item.data.created_utc }}</td> <td>{{ item.data.subreddits }}</td> <td>{{ item.data.num_comments }}</td> <td>{{ item.data.permalink }}</td> </tr> {% endfor %} </table> </body> </html>
Our final result is here:
Adding authentication
While the
search method can be very powerful, the Reddit API has dozens more features we can explore, but most of them require authentication. To be able to access all that functionality, we first need a Reddit account, so please make sure you have one before continuing.
After we have an account and before we are able to access the API, there’s some configuration work to be done. Log into your account, and in the top right corner you’ll see the “preferences” button. Click it and the go to the “Apps” tab, then click “Create”.
Fill in the information (and be sure to remember that the Redirect URI will have to be exactly the one we are going to use in our application). We can leave the
about url blank.
After that, click
Create App. This will give us access to a client ID and a secret token. We will be using this information to authenticate via OAuth2 to the API. For the Oauth2 authentication we will use a very well known package,
adoy/oauth2. This package is a light PHP wrapper for the OAuth 2.0 protocol (based on OAuth 2.0 Authorization Protocol draft-ietf-oauth-v2-15).
composer require adoy/oauth2
Now that we have the tools for using Oauth2, let’s create a file called
Oauth.php in the root of our project with the following content:
<?php require_once './vendor/autoload.php'; use ApiReddit\Authenticator; $authorizeUrl = ''; $accessTokenUrl = ''; $clientId = 'Your Client Id'; $clientSecret = 'Your Secret'; $userAgent = 'RedditApiClient/0.1 by YourName'; $redirectUrl = "Your redirect url"; $auth = new Authenticator( $authorizeUrl, $accessTokenUrl, $clientId, $clientSecret, $userAgent, $redirectUrl ); $auth->authenticate();
We are creating an instance of
Authenticator and calling the
authenticate() method. We are also autoloading the class by adding it to Composer. For this to work, let´s create the
Authenticator.php class file in our
/src folder.
<?php namespace ApiReddit; class Authenticator { public function __construct($authorizeUrl, $accessTokenUrl, $clientId, $clientSecret, $userAgent, $redirectUrl) { $this->authorizeUrl = $authorizeUrl; $this->accessTokenUrl = $accessTokenUrl; $this->clientId = $clientId; $this->clientSecret = $clientSecret; $this->userAgent = $userAgent; $this->redirectUrl = $redirectUrl; } public function authenticate() { $client = new \OAuth2\Client($this->clientId, $this->clientSecret, \OAuth2\Client::AUTH_TYPE_AUTHORIZATION_BASIC); $client->setCurlOption(CURLOPT_USERAGENT, $this->userAgent); if (!isset($_GET["code"])) { $authUrl = $client->getAuthenticationUrl($this->authorizeUrl, $this->redirectUrl, array( "scope" => "identity", "state" => "SomeUnguessableValue" )); header("Location: " . $authUrl); die("Redirect"); } else { //$this->getUserPreferences($client, $this->accessTokenUrl); } } }
In the
Oauth.php file, we are initializing our project variables with all the data needed to authenticate through the API.
Authenticator.php is where the magic happens.
We are creating a new OAuth2 client instance with our ID and secret using
adoy. After that is basic OAuth logic: we use an authentication URL to execute the authentication and a redirect one to where we will be redirected after authentication.
One important thing to notice is the use of a
scope in our call. The scope is basically the scope of the functionality we want to have access to. In this case, we are using
identity because, in this example, we will be wanting to be fetch our own user information. All the possible actions and respective scopes are explained in the API documentation.
Last but not least, we have a line that is commented. This was on purpose. The
getUserPreferences method will make the call to the API method we want to use. So let’s uncomment that line, and implement the method below.
/** * This function will request the Reddit API for the user information * * @param $client The client ID * @param $accessToken The access token given */ public function getUserPreferences( $client, $accessToken ) { $params = array("code" => $_GET["code"], "redirect_uri" => $this->redirectUrl); $response = $client->getAccessToken($accessToken, "authorization_code", $params); $accessTokenResult = $response["result"]; $client->setAccessToken($accessTokenResult["access_token"]); $client->setAccessTokenType(OAuth2\Client::ACCESS_TOKEN_BEARER); $response = $client->fetch(""); echo('<strong>Response for fetch me.json:</strong><pre>'); print_r($response); echo('</pre>'); }
We are obtaining the access token and using it in our call to fetch information from. This GET method will return the identity of the user currently authenticated. The answer will be a json array.
If we run it, we will be taken to a login page, just like it is supposed to happen with OAuth. After logging in, we will be prompted with this page:
Just click allow, and if everything went right, we should have a json array containing all the information about the user we just authenticated with.
This is how we authenticate with the Reddit API. We can now take a deeper look at the documentation and check all the great things we can make with it.
All the code can be found in this github repository.
Conclusion
We learned how to to use the search functionality of the Reddit API and took a first look at authenticating and using the methods that require a logged in user. But, we just scratched the surface.
The possibilities are huge because the Reddit API gives us access to an almost endless knowledge pool. Be sure to check the entire Reddit API documentation as it offers much more, and do let us know if you build anything interesting! | https://www.sitepoint.com/taming-the-snoo-playing-with-the-reddit-api/?utm_source=sitepoint&utm_medium=relatedinline&utm_term=&utm_campaign=relatedauthor | CC-MAIN-2018-22 | en | refinedweb |
Multit-hreading and synchronization is a very important topic for any Java programmer. Good knowledge of multithreading, synchronization, and thread-safety can put you in front of other developers, at same time it's not easy to master these concept. In fact writing correct concurrent code is one of the hardest thing,. Since Java provides different constructs to provide synchronization and locking e.g. volatile keyword, atomic variable, explicitly locking using java.util.concurrent.lock.Lock interface and there popular implementations e.g. ReentrantLock and ReentrantReadWriteLock, It becomes even more important to understand difference between synchronized and other constructs. Remember, clear understanding of synchronization is must to write correct concurrent code in Java, which is free of multithreading issues like deadlock, race conditions and thread-safety. I am sure, things learned in this Java synchronization tutorial will help. Once you gone through this article, You can further read Java Concurrency in Practice to develop your concept. That's the one of those book which every Java developer must read.
What is Synchronization in JavaSynchronization in Java is an important concept since Java is a multi-threaded language where multiple threads run in parallel to complete program execution. In multi-threaded environment synchronization of Java object or synchronization of Java class becomes extremely important. Synchronization in Java is possible by using Java keywords "synchronized" and "volatile”. Concurrent access of shared objects in Java introduces to kind of errors: thread interference and memory consistency errors and to avoid these errors you need to properly synchronize your Java object to allow mutual exclusive access of critical section to two threads. By the way This Java Synchronization tutorial is in continuation of my article How HashMap works in Java and difference between HashMap and Hashtable in Java if you haven’t read already you may find some useful information based on my experience in Java Collections.
Why do we need Synchronization in Java?
If your code is executing in multi-threaded environment, you need synchronization for objects, which are shared among multiple threads, to avoid any corruption of state or any kind of unexpected behavior. Synchronization in Java will only be needed if shared object is mutable. if your shared object is either read only or immutable object, than you don't need synchronization, despite running multiple threads. Same is true with what threads are doing with object if all the threads are only reading value then you don't require synchronization in Java. JVM guarantees that Java synchronized code will only be executed by one thread at a time. In Summary Java synchronized Keyword provides following functionality essential for concurrent programming :.
Synchronized keyword in Java
Example of Synchronized Method in Java
Using synchronized keyword along with method is easy just apply synchronized keyword in front of method. What we need to take care is that static synchronized method locked on class object lock and non static synchronized method locks on current object (this). So it’s possible that both static and non static java synchronized method running in parallel. This is the common mistake a naive developer do while writing Java synchronized code.
public class Counter{ private static int count = 0; public static synchronized int getCount(){ return count; } public synchoronized setCount(int count){ this.count = count; } }
In this example of Java synchronization code is not properly synchronized because both getCount() and setCount() are not getting locked on same object and can run in parallel which may results in incorrect count. Here getCount() will lock in Counter.class object while setCount() will lock on current object (this). To make this code properly synchronized in Java you need to either make both method static or non static or use java synchronized block instead of java synchronized method.By the way this is one of the common mistake Java developers make while synchronizing their code.
Example of Synchronized Block in Java
Using synchronized block in java is also similar to using synchronized keyword in methods. Only important thing to note here is that if object used to lock synchronized block of code, Singleton.class in below example is null then Java synchronized block will throw a NullPointerException.
public class Singleton{ private static volatile Singleton _instance; public static Singleton getInstance(){ if(_instance == null){ synchronized(Singleton.class){ if(_instance == null) _instance = new Singleton(); } } return _instance; }
This is a classic example of double checked locking in Singleton. In this example of Java synchronized code, we have made only critical section (part of code which is creating instance of singleton) synchronized and saved some performance. If you make whole method synchronized than every call of this method will be blocked, while you only need blocking to create singleton instance on first call. By the way, this is not the only way to write threadsafe singleton in Java. You can use Enum, or lazy loading to avoid thread-safety issue during instantiation. Even above code will not behave as expected because prior to Java 1.5, double checked locking was broker and even with volatile variable you can view half initialized object. Introduction of Java memory model and happens before guarantee in Java 5 solves this issue. To read more about Singleton in Java see that.
Important points of synchronized keyword in Java
2. You can use java synchronized keyword only on synchronized method or synchronized block.
3. When ever a thread enters into java synchronized method or block it acquires a lock and whenever it leaves java synchronized method or block it releases the lock. Lock is released even if thread leaves synchronized method after completion or due to any Error or Exception.
4. Java Thread acquires an object level lock when it enters into an instance synchronized java method and acquires a class level lock when it enters into static synchronized java method. java.lang.NullPointerException if myInstance is null.
7. One Major disadvantage of Java synchronized keyword is that it doesn't allow concurrent read, which can potentially limit scalability. By using concept of lock stripping and using different locks for reading and writing, you can overcome this limitation of synchronized in Java. You will be glad to know that java.util.concurrent.locks.ReentrantReadWriteLock provides ready made implementation of ReadWriteLock in Java.
8. One more limitation of java synchronized keyword is that it can only be used to control access of shared object within the same JVM. If you have more than one JVM and need to synchronized access to a shared file system or database, the Java synchronized keyword is not at all sufficient. You need to implement a kind of global lock for that.
9. Java synchronized keyword incurs performance cost. Synchronized method in Java is very slow and can degrade performance. So use synchronization in java when it absolutely requires and consider using java synchronized block for synchronizing critical section only.
10. Java synchronized block is better than java synchronized method in Java because by using synchronized block you can only lock critical section of code and avoid locking whole method which can possibly degrade performance. A good example of java synchronization around this concept is getInstance() method Singleton class. See here.
11. Its possible that both static synchronized and non static synchronized method can run simultaneously or concurrently because they lock on different object.
12. From java 5 after change in Java memory model reads and writes are atomic for all variables declared using volatile keyword (including long and double variables) and simple atomic variable access is more efficient instead of accessing these variables via synchronized java code. But it requires more care and attention from the programmer to avoid memory consistency errors.
13. Java synchronized code could result in deadlock or starvation while accessing by multiple thread if synchronization is not implemented correctly. To know how to avoid deadlock in java see here.
14. According to the Java language specification you can not use Java synchronized keyword with constructor it’s illegal and result in compilation error. So you can not synchronized constructor in Java which seems logical because other threads cannot see the object being created until the thread creating it has finished it.
15. You cannot apply java synchronized keyword with variables and can not use java volatile keyword with method.
16. Java.util.concurrent.locks extends capability provided by java synchronized keyword for writing more sophisticated programs since they offer more capabilities e.g. Reentrancy and interruptible locks.
17. Java synchronized keyword also synchronizes memory. In fact java synchronized synchronizes the whole of thread memory with main memory.
18. Important method related to synchronization in Java are wait(), notify() and notifyAll() which is defined in Object class. Do you know, why they are defined in java.lang.object class instead of java.lang.Thread? You can find some reasons, which make sense.
19. Do not synchronize on non final field on synchronized block in Java. because reference of non final field may change any time and then different thread might synchronizing on different objects i.e. no synchronization at all. example of synchronizing on non final field :
private String lock = new String("lock"); synchronized(lock){ System.out.println("locking on :" + lock); }
any if you write synchronized code like above in java you may get warning "Synchronization on non-final field" in IDE like Netbeans and InteliJ
20. Its not recommended to use String object as lock in java synchronized block because string is immutable object and literal string and interned string gets stored in String pool. so by any chance if any other part of code or any third party library used same String as there lock then they both will be locked on same object despite being completely unrelated which could result in unexpected behavior and bad performance. instead of String object its advised to use new Object() for Synchronization in Java on synchronized block.
private static final String LOCK = "lock"; //not recommended private static final Object OBJ_LOCK = new Object(); //better public void process() { synchronized(LOCK) { ........ } }
21. From Java library Calendar and SimpleDateFormat classes are not thread-safe and requires external synchronization in Java to be used in multi-threaded environment.
Probably most important point about Synchronization in Java is that, in the absence of synchronized keyword or other construct e.g. volatile variable or atomic variable, compiler, JVM and hardware are free to make optimization, assumption, reordering or caching of code and data, which can cause subtle concurrency bugs in code. By introducing synchronization by using volatile, atomic variable or synchronized keyword, we instruct compiler and JVM to not to do that.
Update 1: Recently I have been reading several Java Synchronization and Concurrency articles in internet and I come across jeremymanson's blog which works in google and has worked on JSR 133 Java Memory Model, I would recommend some of this blog post for every java developer, he has covered certain details about concurrent programming , synchronization and volatility in simple and easy to understand language, here is the link atomicity, visibility and ordering.
Update 2: I am grateful to my readers, who has left some insightful comments on this post. They have shared lots of good information and experience and to provide them more exposure, I am including some of there comments on main article, to benefit new readers.
@Vikas wrote.
3. on similar note if thread is waiting for lock to acquired there is no way to interrupt the thread.
All these limitation of synchronized keyword is addressed and resolved by using ReadWriteLock and ReentrantLock in Java 5.
@George wrote
Just my 2 cents on your great list of Java Synchronization facts and best practices :
@George wrote
Just my 2 cents on your great list of Java Synchronization facts and best practices :
1) synchronized keyword in internally implemented using two byte code instructions MonitorEnter and MonitorExit, this is generated by compiler. Compiler also ensures that there must be a MonitorExit for every MonitorEnter in different code path e.g. normal execution and abrupt execution, because of Exception.
2) java.util.concurrent package different locking mechanism than provided by synchronized keyword, they mostly used ReentrantLock, which internally use CAS operations, volatile variables and atomic variables to get better performance.
2) java.util.concurrent package different locking mechanism than provided by synchronized keyword, they mostly used ReentrantLock, which internally use CAS operations, volatile variables and atomic variables to get better performance.
3) With synchronized keyword, you have to leave the lock, once you exist a synchronized method or block, there is no way you can take the lock to other method. java.util.concurrent.locks.ReentrantLock solves this problem by providing control for acquiring and releasing lock, which means you can acquire lock in method A and can release in method B, if they both needs to be locked in same object lock. Though this could be risky as compiler will neither check nor warn you about any accidental leak of locks. Which means, this can potentially block other threads, which are waiting for same lock.
4) Prefer ReentrantLock over synchronized keyword, it provides more control on lock acquisition, lock release and better performance compared to synchronized keyword.
4) Prefer ReentrantLock over synchronized keyword, it provides more control on lock acquisition, lock release and better performance compared to synchronized keyword.
5) Any thread trying to acquire lock using synchronized method will block indefinitely, until lock is available. Instead this, tryLock() method of java.util.concurrent.locks.ReentrantLock will not block if lock is not available.
Having said that, I must say, lots of good information.
Having said that, I must say, lots of good information.
Recommend Books to learn Synchronization and Concurrency in JavaSynchronization and Concurrency is complex topic in Java and it's not easy to master them. Even more experienced Java developers struggle to write correct concurrent code in Java. I would highly recommend following Java books to master multi-threading, synchronization and Concurrency.
- Java Concurrency in Practice By Brian Goeatz and team
- Effective Java by Joshua Bloach
- Java Threads By Scott Oaks and Henry Wong
30 comments :
I think that the "double checked locking" example that you gave have to use the volatile keyword to be correct.
Hi alias , you are correct to keep Singleton as Singleton that has to be volatile but that example was just to show use of synchronized block instead of getInstance() method but no harm on putting volatile there :) Thanks
You cannot use volatile on a methode, the keyword is for variable. Your Singleton example won't compile.
@Arnaud Vandyck , Thanks for pointing out, It was a typo , volatile indeed only applicable to variable and was intended for Singleton instance but some how placed on method signature. I will correct them. Thanks
@Anonymous, good to hear that you like the post. Indeed synchronization is very important topic specially if you are working with online stock trading companies or any electronic trading platform which are designed to be concurrent for high volume and low latency.
Nice Blog on synchronisation(for that matter a lot of topics)...got to know the knick knacks of java ...thanx to u:)
Keep up ur good work. Kudos!
What is synchronization in Java and Why do we need it ? Can you give example of synchronization in java ? What will happen if we don't have synchronization in Java ???
@Anonymous , Synchronization in Java means allowing controlled access of a shared resource to avoid problem like deadlock. java is a multi-threaded language which gives you ability to write high performance concurrent programs which can benefit from high end multi-core processors. you can use synchronized and volatile keyword to achieve synchronization in java. and if you read this tutorial you will see example of using synchronized block and synchronized method in java.
s/synchoronized/synchronized/ ;)
thanks Peter done s/synchoronized/synchronized/, only one match found :)
Thanks for giving the detailed explanation on thread synchronization. If possible, please do expand the article by showing usefulness of wait(), notify(), notifyAll() methods in synchronization.
Most Informative article on Java Synchronization, Never know there is so much to learn on synchronization in Java. you have covered almost everything form synchronized method to synchronized block to some really less known points on Synchronization. Though its more focus on Java and Synchronization , this kind of article on any topic is quite useful.
Hi Abdul, Thanks for your comment and you like this Java synchronization tutorial. keep the good work in your blog as well.
You have quite a few compiler errors in your Counter class. But the biggest problem is you are attempting to access 'this' from a static method. Instead you should use the class name Counter since count is a static variable.
Hi Jeff R, Thanks for your comment. I see your point, this should not be used to access static variable but code is not written to be compilation there. It just to show that static and non static synchronized method lock on two different object so it effectively means no mutual exclusive access because two thread can concurrently run static and non static methods. Anyway its always better to have clean code , so I will correct that. thanks for pointing.
nice selection of book.These books are very helpful for experience persons who are working on java
Thanks skplife, Glad you like the books, Effective Java and Java Concurrency in Practice for multi-threading and synchronization is must read for any Java developer.
Interesting article. The heading promises that it will tell us how synchronization works. Instead it only tells us how to use synchronization. Disappointed...
Hello there :) me again...
There's a small gap here:
"Lock is released .... or due to any Error or Exception"
The thread does NOT release the lock when an exception is thrown, and the program stalls indefinitely.hence a kind of deadlock appears, since the second thread cannot acquire the Lock that thread 1 acquired, and did not released it;
I coded a small example in this morning, we can check together :)
================================================
class HoldingLockWhenException {
private boolean isLocked = false;
private class Foo implements Runnable {
public void run() {
synchronized(HoldingLockWhenException.this) {
System.out.println(Thread.currentThread().getName() + " acquired the Lock on outer this");
if(!isLocked)
throw new IllegalThreadStateException();
isLocked = true;
HoldingLockWhenException.this.notify();
}
}
}
private class Bar implements Runnable {
public void run() {
synchronized(HoldingLockWhenException.this) {
System.out.println(Thread.currentThread().getName() + " acquired the Lock on outer this");
while(!isLocked) {
try {
HoldingLockWhenException.this.wait();
} catch(InterruptedException interx) {}
}
System.out.println(Thread.currentThread().getName() + " process further");
}
}
}
public static void main(String[] argv) {
HoldingLockWhenException outer = new HoldingLockWhenException();
Thread run1 = new Thread(outer.new Foo());
Thread run2 = new Thread(outer.new Bar());
run1.start();
run2.start();
}
}
================================================
@Anonymous
The lock release occurs even if the return was caused by an uncaught exception.
Source :
If we used synchronized in singleton class it will allow to access the object for only one user. if 100 users wants to access the same object then it has to take some seconds of time to release the lock for every user like that how much time it takes for 100th user? In this scenario, how can we handle ?
I'm not understanding why the double checked locking code would throw a null pointer exception. The code is identical to the ones here
public class Letters extends Thread {
private String name;
public Letters(String name) { this.name = name; }
public void write() {
System.out.print(name);
System.out.print(name);
}
public static void main(String[] args) {
new Letters("X").start();
new Letters("Y").start();
}
public void run() { synchronized(this) { write(); } }
}
Output i m getting sometimes is XYYX . I am thinking because write method is synchronized here , so write method would be locked on current object and it should be either XXYY or YYXX. Tell me what's wrong in my concept?? I m not getting here.
@Anonymous
- I don't see synchronized keyword on write() method.
- you are using two different thread and two different object, which means there is effectively no locking. In your current code, try making write() method static synchronized.
In one of Interview, I was asked couple of questions from Java Synchronization, It would be good if you can answer this here:
Can you synchronize on local variable or non final variable in Java? If you do, what problem will you face?
Why Object Lock should be private, if used to lock a critical section?
My answer to the first question was YES. Java Synchronization allows you to use local variable or non final field to lock synchronized block, but I couldn't answer risk part.
For second question also, I got confused that what will happen if made Object lock non public or protected?
Hi...i have some issue with synchronization....please help me.
My application is a swing application. I have a button. The button action code is below
LabelStatus.setText("searching");
BluetoothBrowser.main(null);
BluetoothBrowser class contain synchronized block. Now my problem is when i am click on the button it doesn't display the text on the LabelStatus. What is wrong with my code
check if BluetoothBrowser.main(null); is blocking call or not, since event handling and graphics drawing is done by Event dispatcher thread, it won't draw anything until it return from your action listener code.
@Radhika
Yes we can use local variable or non final variable for synchronization but they will or may lead you to the problems.
1. Since new local variables gets created for every call to the method so there is no point in synchronizing on local variable because another thread running in same method will be having different object in same variable.
2. Similarly for non final variable just imagine the case if i change the object reference in non final variable.
I hope you can think of the problem now. :)
Vipni
For anyone interested in what the JVM does:
Under the section "Important points of synchronized keyword in Java" #19 'Do not synchronize on non final field on synchronized block in Java' example, isn't the String class object final by default. | http://javarevisited.blogspot.com/2011/04/synchronization-in-java-synchronized.html?showComment=1304249650583 | CC-MAIN-2015-40 | en | refinedweb |
policy argument determiens the policy that will be used to update the message model. The default value, compat32 maintains backward compatibility with the Python 3.2 version of the email package. For more information see the policy documentation.
Changed in version 3.3: The policy keyword argument was added. that is required by the unix mbox format. For more flexibility, instantiate a Generator instance and use its flatten() method directly. For example:
from io import StringIO from email.generator import Generator fp = StringIO() g = Generator(fp, mangle_from_=True, maxheaderlen=60) g.flatten(msg) text = fp.getvalue()
Equivalent to as_string(unixfrom=True).
Return True if the message’s payload is a list of sub-Message objects, otherwise return False. When is_multipart() returns False, the payload should be a string object.
Set the message’s envelope header to unixfrom, which should be a string.
Return the message’s envelope header. Defaults to None if the envelope header was never set.
Add the given payload to the current payload, which must be None or a list of Message objects before the call. After the call, the payload will always be a list of Message objects. If you want to set the payload to a scalar object (e.g. a string), use set_payload() instead., the payload is returned as-is (undecoded). In all cases the returned value is binary data. If the message is a multipart and the decode flag is True, then None is returned. If the payload is base64 and it was not perfectly formed (missing padding, characters outside the base64 alphabet), then an appropriate defect will be added to the message’s defect property (InvalidBase64PaddingDefect or InvalidBase64CharactersDefect, respectively).
When decode is False (the default) the body is returned as a string without decoding the Content-Transfer-Encoding. However, for a Content-Transfer-Encoding of 8bit, an attempt is made to decode the original bytes using the charset specified by the Content-Type header, using the replace error handler. If no charset is specified, or if the charset given is not recognized by the email package, the body is decoded using the default ASCII charset.
Set the entire message object’s payload to payload. It is the client’s responsibility to ensure the payload invariants. Optional charset sets the message’s default character set; see set_charset() for details. parameter.
Return the Charset instance associated with the message’s payload. objects with a charset of unknown-8bit.
Return the total number of headers, including duplicates.!'
Delete all occurrences of the field with name name from the message’s headers. No exception is raised if the named field isn’t present in the headers.
Return a list of all the message’s header field names.
Return a list of all the message’s field values.
Return a list of 2-tuples containing all the message’s field headers and values.
Return the value of the named header field. This is identical to __getitem__() except that optional failobj is returned if the named header is missing (defaults to None).
Here are some additional useful methods:
Return a list of all the values for the field named name. If there are no such named headers in the message, failobj is returned (defaults to None). a header. Replace the first header found in the message that matches _name, retaining header order and field name case. If no matching header was found, a KeyError is raised..
Return the message’s main content type. This is the maintype part of the string returned by get_content_type().
Return the message’s sub-content type. This is the subtype part of the string returned by get_content_type().
Return the default content type. Most messages have a default content type of text/plain, except for messages that are subparts of multipart/digest containers. Such subparts have a default content type of message/rfc822.
Set the default content type. ctype should either be text/plain or message/rfc822, although this is not enforced. The default content type is not stored in the Content-Type header..
Remove the given parameter completely from the Content-Type header. The header will be re-written in place without the parameter or its value. All values will be quoted as necessary unless requote is False (the default is True). Optional header specifies an alternative to Content-Type.. email.utils.unquote().
Return the value of the boundary parameter of the Content-Type header of the message, or failobj if either the header is missing, or has no boundary parameter. The returned string will always be unquoted as per email.utils.unquote().. | https://wingware.com/psupport/python-manual/3.3/library/email.message.html | CC-MAIN-2015-40 | en | refinedweb |
Asked by:
Macros run slow on Word 2007
Question
All replies
Word 2007 does have the ability to save a file in Word 97-2003 format. I have no idea however whether or not that will change the speed at which the macro runs.
-- Hope this helps.
Doug Robbins - Word MVP,
dkr[atsymbol]mvps[dot]org
Posted via the Community Bridge
"jgugenheim" wrote in message news:4a5d1400-33aa-4a0d-b30e-482f02e76f1b@communitybridge.codeplex.com...
Doug Robbins - Word MVP dkr[atsymbol]mvps[dot]org
Hi - and thanks for your replies.
The problem seems to me to be that when Word 2007 saves the files in 2003 format it leaves some 2007 "thing" in them which slows down the running of macros even under Word 2003. If there was some tool out there that was able to convert these files to "genuine" 2003 files I could then use Word 2003 to run the macros at the old speed.
I don't think there is a problem in the macros themselves as they run correctly under either version of Word, only the speed is different - until the files have been touched by Word 2007, then they run slowly in either version of Word.
Obviously none of this is ideal, running the macros at full speed in Word 2007 would be a better solution!
Thanks again and any more ideas would be greatly appreciated.
Jeremy Gugenheim
Hi Jeremy
Have you tried opening the documents in Word 2003 then doing a full Save As to that file format? Or to WordProcessingML (Word's XML file format)? Either of those actions could toss out whatever (round-trip?) stuff Word 2007 is putting into the file...
But, as I said, unless you provide more information about the document, what it contains, and what the macro does, there aren't many suggestions anyone can make...
Cindy Meister, VSTO/Word MVP
Hi
I'm quite happy to post the macros, but they are huge. The Normal.dot file is 860KB. The chapter files are a bit difficult, I doubt if my client would appreciate it. The basic concept of some of the macros that are horribly slow is that they do a "For Each ch In oldDoc.Words" to work their way through every word of a document - which could be 200 pages long. The one that alerted me to the problem runs to some 800 lines of code. Now, I don't expect it to finish that by the time I've released the mouse button, but an hour??? Word 2003 ran them in about 5 minutes.
The documents contain mostly words, quite a few tables and a very few .WMF diagrams. All the text is styled, and is laid out in a very hierarchical Section/Subsection/Subsubsection/Bodytext fashion. Indeed the specific macro I mention above rattles through checking that the sections & subsections are numbered correctly.
I should perhaps add that I was an assembler/C/C++ programmer for 25 years, so I have some idea of how to write code. I'm definitely not trying to brag, just indicate a ballpark level of competence.
I'm ashamed to say that I only tried a Save, not a Save as, so I will try that. So much for 25 years of experience, eh?
Thank you very much for your reply, I will certainly try the Save As tip.
Jeremy
Hi Jeremy
I'll be looking forward to learning the results of "Save As" :-)
"Walking" a document, word-by-word would be slow, yes.
Moving forward to the new file formats it might make sense to look at processing the OpenXML file, rather than automating the Word application. Standard .NET Framework XML and Packaging namespaces can help you lay open the file and work directly with the internal document.xml. That would have to be loads faster in execution than even Word 2003.
Cindy Meister, VSTO/Word MVP
Hi Cindy,.
I suspect the XML route is the answer, but the project is running out of time this year so I think I will just have to thrum the desk with my fingers a lot for the next few weeks and then look at it all properly after publication. The only difficulty I can see with the XML method is that, because it would be a standalone program rather than built into Word, I could see my client smelling "magic" and assuming a rat. Also they may feel worried about ongoing support (what if I get run over by a bus etc.).
I'm a bit disappointed at what MS have done, I have scoured the web for speed info on Word 2010 but have seen no comment. Perhaps I should download the 30 day trial and run some tests.
Anyway, many many thanks for your input and your time, I have found the MVP site a life saver before and really appreciate all your efforts.
Jeremy
Hi Jeremy
<.>>
Mmm. That shouldn't happen. Perhaps this document is slightly corrupt? That could be part of the reason for the speed differences?
Cindy Meister, VSTO/Word MVP
Ah, well, there's the thing. These chapters are many years old and have never been restarted from scratch, always from the previous year's effort. There is a bug known to Microsoft concerning the display (or otherwise) of text condensed by less than 0.2 of a point in some of them and I am quite sure other bits of detritus as well.
We had sort of hoped that, moving to the new file format, some of these historical problems might go away!! Oh well...
I'm not convinced about the speed comment though, because if I go back to a chapter that has not been edited by 2007 (last year's chapters) then the macros run at normal speed under 2003.
I have now tried this under Windows 7 on my Dell and Windows XP in a Parallels VM on my Mac. The results are the same on both systems.
Jeremy
- Hi jgugenheim,
How is.
Hi,
I was very interested to find this thread. We too are experiencing a very similar problem. We have a large word document (Office 2003), and process it with a large macro to generate HTML (we also use the word document to create a pdf file) to use as help in our application. When we first generated this on Windows XP it took about 20 minutes to run. Then, for no apparent reason, it suddenly increased the time to 3 hours. There is a team of 4 developers, and on different machines it took either one or the other of those times (consistent for the machine). All these machines were using exactly the same files checked out of subversion. There was no rhyme or reason as to Office 2003 or 2007 in compatability mode, operating system being XP or Vista, 32 or 64 bit.
That situation continued for a year or so. Then, about a month ago, my XP machine suddenly decided to revert back to 20 minutes!! Now, I have got a fancy, high performance new Windows 7 machine with Office 2007. And the macro takes - wait for it ..... 10 hours!!!!
Like the discussions above our macro goes through the whole 350 page document in detail - and yes, we too are C++ programmers with lots of experience (and ability to get it wrong). We have no idea why this should be. An answer would really be welcome.
Regards,
Roy
- I've also picked up this thread. I have VBA code that generates text comments in a Word document. The code runs well using Word 2003 but becomes very pedestrian with Word 2007. Paragraphs that appear virtually instantaneously under Word 2003 appear with delays of about 1s between each segment of text. Stepping through the code does not help identify what is causing the problem. I suspect there are some background processes that are getting in the way but I've no idea what they might be. Automatic spelling and grammar checking is turned off by the code so it shouldn't be that. I'm at a loss and VERY frustrated.
Further to my comments above we have an interesting observation. In all my previous observations we had kept the word document open in full screen mode. However, we have now found that our ten-twelve hours run, on that same new Windows 7 machine, can be reduced to about 1 hour simply by not running in full screen mode and making the window size very small (so that the client window is less than 1 inch square - just enough to see what is happening). This suggests that we need to look at the screen updating.
Does anyone else see this effect?
- *Everybody* sees that effect, in every version of Word -- anything that modifies orscrolls the displayed document will slow down macro execution, often by an order ofmagnitude. There are several methods of minimizing the effect besides just making thewindow small:- If your macro uses the Selection.Find object, replace that with the .Find object of aRange variable to avoid scrolling the display. For example, if you have the typicalSelection.HomeKey Unit:=wdStoryWith Selection.Find' set up .Text, .Replacement, etc.While .Execute' do some stuffWendEnd Withreplace it withDim myRg As RangeSet myRg = ActiveDocument.RangeWith myRg.Find' set up .Text, .Replacement, etc.While .Execute' do some stuffWendEnd With- At the beginning of the macro, setApplication.ScreenUpdating = Falseand at the end of the macro set it back toApplication.ScreenUpdating = True- For extreme cases, try not displaying the Word screen at all() or calling the LockWindowUpdateAPI function ().
Jay Freedman
MS Word MVP FAQ:
- We have a very similar problem - a large text search/manipulation macro runs about 5 times more slowly under 2010. Re. the sudden reversion to the previous speed - this could be due to a conflicting set of macros or add-in that was installed and then deleted. Adobe Acrobat's PDFMaker function, for example, really slows things down (we now use NitroPDF which doesn't have the same problem). I'd never heard of the screen size issue, but it makes sense. I tried it on Word 2003, and a 155 sec run was reduced to 134 secs. I've asked a colleague to try it on his Word 2010. This has been going on since Word 2007, where I seem to recall that the macro execution speed was even worse than 2010. I don't understand why MicroSoft don't come clean on this one, and say what's happening. Or maybe they do, but it's buried somewhere deep in their support documents. John
I've just come back to this for the next revision of my client's book and the problem is still here on all my machines. The comments about screen updating are doubtless true, but will not be responsible for the delays that are being seen. I have always cut screen updates to a minimum and anyway we are all talking about a sudden change in speed of execution caused by the use of Word 2007.
My initial feeling was that MS had added a bunch of defensive code to stop macros misbehaving - a wise move. However, this would not explain the reduction in speed on a machine that had never seen an installation of Word 2007. The damage must be in the document file. And it must be damage. Short of hex dumping the documents and looking for differences between a file saved by 2003 and also by 2007 (and then divining what that difference actually means) we are at the mercy of MS who seem to be keeping their heads down.
I have not tried Word 2010, if anyone has any intel I'd be interested to hear it. If anyone knows of a way of customizing the ribbon on Word 2011 for the Mac then I'll gladly try it there as well -- actually I could download the trial and manually run a macro or two from the developer tab...
Thanks for all your input, it would be very handy if we could find a solution.
Jeremy
Actually I've just thought of a possible flaw in my last post.
I cannot remember if the machine that I ran the tests on that had never seen Word 2007 had been updated with all the latest Word 2003 patches. Given that I usually patch everything as soon as possible it is likely that it had been updated. If this was the case then it is possible that after Word 2007 was released, its macro engine was then published for Word 2003. This could explain why the macros run slow under Word 2003 as well now. EXCEPT that I do recall running a macro under Word 2003 at full speed, then running it under Word 2007 slowly ON THE SAME MACHINE and then running it slowly on Word 2003 ON THE SAME MACHINE again - this still indicates there is a change to the document that kicks off different macro behaviour.
Just thought I'd mention it.
Jeremy
This may be a result of crud accumulating in the actual compiled code that Word stores. If the compilers are different between Word versions, switching may slow things down.
What has worked for me in similar situations is to export the module to a .bas file, delete the module from the word doc or template where it is stored, and then import the .bas file.
This seems to clean up crud and speed things up.
- Jessica
Hello,
I am quite familiar withe the problem you describe, although, up to this day, I had not connected it back to a possible corruption of the Word file. It is now clear for me that it is the problem, and, fortunately, there is a solution that is even already installed in Word 2003!
I am currently using the French version of the program, so please bear with me and interpret my instructions with some flexibility.
Start Word 2003 and make a standard File - Open dialog. Select the file you want to work with and, instead of pressing the Open button, press on the small downward arrow to the right of it, which will make a drop-down menu appear, with the option "Open and repair". Select it and after a while (depending on the size of your file), you will have a newly scrubbed Word document.
For my Word files, which I am using to create the web pages of the e-learning web site i-structures.epfl.ch, the original speed of translation under Word 2003 (for a file I just tried, about 80 seconds) is recovered, compared with the 300-some seconds that I got earlier today before I followed through this thread.
Now, while I am at it, there is an observation I made while I was running my macro. I usually store my files on the University server, so I observed some network traffic while my macro was running (less than 25% for a 100 Mbits network, but nonetheless). I found it strange, and copied my file on my local disk. the Word template with the macros was already on the local disk. So, it should all run locally, no ? Well, with the "old" file, before I repaired it, the network activity was still there, although my macro was only reading and writing to the local disk. I don't know what is going on here, but I would suggest that some process is attempting to do something on the network that it should not be doing. This activity has disappeared after the repair. I should also note that I have a clean machine with an aggressive and up-to-date antivirus on it.
Hope this helps y'all,
Ollie
Hi Ollie
I'll try out your suggestion when I get a moment, but I have been through much of this before. See
One thing to note is that Word 2007 onward (which uses compressed/zipped html format files) takes about a second to open a file. If you are searching multiple files, this adds up.
My experience has been that Word 2007 onwards, using the html format, runs text manipulation macros 8 or 9 times more slowly, and even if you run a null macro it takes 8 or 9 times as long just to open a file in 2007/2010 as in 2003.
I have several thousand files in our research database, and I can't believe that they are all damaged. If the Open and repair method works, it is probably because Word is doing some rearranging of its own that resolves the problem. We do run the macros in a minimized window, which helps in all versions of Word.
We tried running Word 2010 with .doc files rather than .docm, and the problem persisted, I suspect because the .doc files had to be converted to an internal html format before Word 2010 could do anything with them.
If you have any further feedback since your last post, I'd be very interested to hear it.
Rgds
John Davidson
Hi Everyone
Something that seems to be getting missed (I read the link above with interest) is that once a file has been opened in Word 2007, then saved again as a Word 2003 .doc file, the macros run slow even when the file is opened in Word 2003. You can delete Word 2007 off the system, the macros still run slow. You can copy the file to a PC that has never had Word 2007 installed on it, the macros still run slow. So this can't be an XML problem because the 2003 .doc files use the old binary format.
Don't they?
Why can't someone from Microsoft give us an answer?? I have macros that are taking 50 minutes and more to run now that used to run in 5.
I note it's two days shy of two years since I started this, and we still have no answer... Microsoft - what are you doing???
Thank you to everyone who has input their tests and suggestions into this thread, it's all deeply appreciated. Maybe one day we'll get there?
Best
Jeremy Gugenheim
Hi John
<<One thing to note is that Word 2007 onward (which uses compressed/zipped html format files)>>
Actually, the file format of Word 2007 onwards is a Zip Package of XML files, not HTML. There's a very big difference in how things are processed internally between html and xml.
Not that this is necessarily relevant to the question at-hand, but it's an important thing to know when discussing Word files...
Although for many things I think the change in file format could explain why some things process more slowly. Compared to the older binary file format Word probably has to do a lot more processing and juggling to match things up across multiple XML files...
Cindy Meister, VSTO/Word MVP, my blog
- Proposed as answer by Ed Price - MSFTMicrosoft employee Thursday, November 08, 2012 6:30 PM
Hello Again
Just checking back in again after some time. Did anyone find a resolution to this issue? I now have a superfast 64-bit, i7, 16 GB RAM m/c, but still running Word 2003 because of the slow-running macros issue. But just a few days ago, Word 2003 started running slowly on a database that was largely the same as it always been. Like about 5-10 times as slow, just as before.
I wonder if there is some conflict between programs, as there is/was with Adobe PDFMaker and other macros (I've necer seen an explanation of that either!), or maybe some of Word's libraries, or something. I'm not using Word 2010, although there is a promotional stub of the thing on my computer, as purchased.
Watching progress and CPU usage using ProcessExplorer, I noted previously that maximum WINWORD.exe usage was around 12.5%, even when it was running the macros flat out. No doubt, the remainder of the time was spent reading-writing files. But now that things are running slowly, the CPU usage falls to 1.5 to 3%, which explains the fall off in speed. Boosting the priority of Word 2003 makes no difference. But the observation is interesting in that the slow-running-macros issue seems to be down to the amount of CPU resources being allocated to Word 2003. Or could it be that it is suddenly taking ages to open a file? Tests done with Word 2010 awhile back demonstrated that it was taking about 1 sec to open a file and run a null macro, which is significant when you have over a 1000 files.
Ho hum -- anyone got any bright ideas?
John
Had a further thought after reading this dialogue:
... which notes that Adobe AddIns are notorious for creating event handler conflicts. I wonder whether Word 2007 onwards uses any inbuilt addins that conflict with some ways of using regular VBA macros. I think that the CPU usage observation is also a fruitful line of enquiry.
John | https://social.msdn.microsoft.com/Forums/office/en-US/4a5d1400-33aa-4a0d-b30e-482f02e76f1b/macros-run-slow-on-word-2007?forum=worddev | CC-MAIN-2015-40 | en | refinedweb |
public class CamelModuleWithRouteTypes extends CamelModule
RoutesBuildertypes - which are then injected by Guice.
If you wish to bind all of the bound
RoutesBuilder implementations available - maybe with some filter applied - then
please use the
CamelModuleWithMatchingRoutes.
Or if you would like to specify exactly which
RoutesBuilder to bind then use the
CamelModule and create a provider
method annotated with @Provides and returning Set
public class MyModule extends CamelModule {
@Provides
Set<Routes> routes(Injector injector) { ... }
}
configure, configureCamelContext
bind, bind, bind, bindAnnotationInjector, bindAnnotationInjector, bindAnnotationInjector, bindInstance, bindMethodHandler, bindMethodHandler, bindMethodHandler, checkInjectedValueType, getParameterType
addError, addError, addError, bind, bind, bind, bindConstant, binder, bindInterceptor, bindListener, bindScope, configure, convertToTypes, currentStage, getMembersInjector, getMembersInjector, getProvider, getProvider, install, requestInjection, requestStaticInjection, requireBinding, requireBinding
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
Apache Camel | http://camel.apache.org/maven/current/camel-guice/apidocs/org/apache/camel/guice/CamelModuleWithRouteTypes.html | CC-MAIN-2015-40 | en | refinedweb |
Blender 3D: Noob to Pro/Advanced Tutorials/Python Scripting/Import scripts
Importing objects into Blender is not that different from exporting. However, there are a few additional things to take care of. Firstly, all references to "export" in the header should be changed to "import". Secondly, instead of simply writing out data that Blender provides to us, we are responsible for giving data to Blender and ensuring that it is properly formatted. Although Blender is flexible, allowing us to ignore things like vertex indices, we do need to be careful that we do things in a sensible order.
Additionally, there is a bit of housekeeping to deal with. We should be in edit mode while modifying the mesh data. We also need to link up our newly created data to the scene, after it has been properly constructed, so that Blender can see it and maintain it. This makes it visible to the user, as well as ensuring that it gets saved along with the scene.
Importing a Mesh[edit]
Here is a simple script that can import an OBJ file created by the export script.
import Blender def import_obj(path): Blender.Window.WaitCursor(1) name = path.split('\\')[-1].split('/')[-1] mesh = Blender.NMesh.New( name ) # create a new mesh # parse the file file = open(path, 'r') for line in file: words = line.split() if len(words) == 0 or words[0].startswith('#'): pass elif words[0] == 'v': x, y, z = float(words[1]), float(words[2]), float(words[3]) mesh.verts.append(Blender.NMesh.Vert(x, y, z)) elif words[0] == 'f': faceVertList = [] for faceIdx in words[1:]: faceVert = mesh.verts[int(faceIdx)-1] faceVertList.append(faceVert) newFace = Blender.NMesh.Face(faceVertList) mesh.addFace(newFace) # link the mesh to a new object ob = Blender.Object.New('Mesh', name) # Mesh must be spelled just this--it is a specific type ob.link(mesh) # tell the object to use the mesh we just made scn = Blender.Scene.GetCurrent() for o in scn.getChildren(): o.sel = 0 scn.link(ob) # link the object to the current scene ob.sel= 1 ob.Layers = scn.Layers Blender.Window.WaitCursor(0) Blender.Window.RedrawAll() Blender.Window.FileSelector(import_obj, 'Import')
This will load an OBJ file into Blender, creating a new mesh object. Let's take a look at the more interesting portions.
Blender.Window.WaitCursor(1)
Turn on the wait cursor so the user knows the computer is importing.
name = path.split('\\')[-1].split('/')[-1] mesh = Blender.NMesh.New( name ) # create a new mesh
Here, we create a new mesh datablock. The name is made from the path only with the filename.
ob = Blender.Object.New('Mesh', name) ob.link(mesh)
Next, we create a new object and link it to the mesh. This instantiates the mesh.
scn = Blender.Scene.GetCurrent() scn.link(ob) # link the object to the current scene ob.sel= 1 ob.Layers = scn.Layers
Finally, we attach the new object to the current scene, making it accessible to the user and ensuring that it will be saved along with the scene. We also select the new object so that the user can easily modify it after import. Copying the scenes layers ensures that the object will occupy the scenes current view layers.
Blender.Window.WaitCursor(0) Blender.Window.RedrawAll()
Now the finishing touches. We turn off the wait cursor. We also redraw the 3D window to ensure that the new object is initially visible. If we didn't do this, the object might not appear until the user changes the viewpoint or forces a redraw in some other way. | https://en.wikibooks.org/wiki/Blender_3D:_Noob_to_Pro/Advanced_Tutorials/Python_Scripting/Import_scripts | CC-MAIN-2015-40 | en | refinedweb |
using @Produces and @Inject across AS7 module dependenciesRalf Sigmund Mar 1, 2012 11:26 AM
Hi,
is it possible to use a CDI producer method defined in module A in order to CDI inject into a bean in a second module B?
Is there any description on the relation between CDI and the JBoss Modules System?
In producer.jar:
import javax.enterprise.inject.Produces; import javax.enterprise.inject.spi.InjectionPoint; import java.util.logging.Logger; public class Producer { @Produces public static Logger produceLog(InjectionPoint injectionPoint) { return Logger.getLogger(injectionPoint.getMember().getDeclaringClass().getName()); } }
In consumer.war:
import javax.inject.Inject; import java.util.logging.Logger; public class Consumer { @Inject Logger logger; public void doLog() { logger.info("Hello CDI with JBoss Modules"); } }
module B has a Manifest Dependency on module A:
Manifest-Version: 1.0 Dependencies: deployment.producer.jar
this approach leads to an weld unsatisfied dependency problem:
"JBAS014671: Failed services" => {"jboss.deployment.unit.\"consumer.war\".WeldService" => "org.jboss.msc.service.StartException in service jboss.deployment.unit.\"consumer.war\".WeldService: org.jboss.weld.exceptions.DeploymentException: WELD-001408 Unsatisfied dependencies for type [Logger] with qualifiers [@Default] at injection point [[field] @Inject question.Consumer.logger]"
I posted a sample project on Github:
TIA
Ralf
1. Re: using @Produces and @Inject across AS7 module dependenciesabhi0123 Apr 13, 2015 2:39 AM (in response to Ralf Sigmund)
I know this is 3 years later but I've the exact same issue. Did you ever find a way to do this?
Weld cannot find producer method in library jar
2. Re: using @Produces and @Inject across AS7 module dependenciesMartin Kouba Apr 13, 2015 3:38 AM (in response to abhi0123)
3. Re: using @Produces and @Inject across AS7 module dependenciesMartin Kouba Apr 13, 2015 4:02 AM (in response to Ralf Sigmund)
Is there any description on the relation between CDI and the JBoss Modules System?
ralf.sigmund The contract is not clearly defined (CDI spec only specifies intra-applicaion injection). However, from WildFly 8 onwards, it's possible to add a dependency on the external deployment and have classes inside this deployment picked up as CDI beans - see also. Moreover, there are other ways to implement inter-application injection - see for example this WildFly quickstart: quickstart/inter-app at master · wildfly/quickstart · GitHub.
4. Re: using @Produces and @Inject across AS7 module dependenciesJozef Hartinger Apr 14, 2015 2:13 AM (in response to Ralf Sigmund)
Note that the built-in modules are not CDI applications on their own. Instead, when a CDI application references these modules, they are treated as bean archives of the application. As for visibility, on WildFly 8 an better the visibility is determined by the (JBoss Modules) classloader visibility. That means that a bean in module A can see a producer in module B as long as the producer is accessible from A's classloader. Therefore, you only need to set up accessibility once - in the module.xml file.
In addition to this, each referenced CDI-enabled built-in module can access beans from the top-level module of a deployment. | https://community.jboss.org/thread/196201 | CC-MAIN-2015-40 | en | refinedweb |
Template:Subjects/doc
This template categorizes a book in up to 10 subject categories, plus the book category (this is automatic). The book category can be modified by setting the
bookcategory variable. You can also use this on subpages of a book and it will only add the book category; however, {{BookCat}} is recommended for subpages.
For non-subject categories (typically, descendants of Category:Books by type), use template {{categories}} instead of this template.
Usage[edit]
{{subjects|mathematics|science|etc}}
{{subjects|bookcategory={{FULLBOOKNAME}} (book)|mathematics|science|etc}}
Internals[edit]
This template is responsible for adding books to the hidden allbooks categories, which allow automatic generation of lists of books belonging to a given subject or any of its sub-subjects. When this template detects that it is being used on the main page of a book, for each subject specified it calls {{subjects/leaf}}. This is why this template should only be used for subject categories: {{subjects/leaf}} uses subject pages to figure out which allbooks categories to add the book to, and if there are no subject pages to consult, it reports the problem to Category:Attention needed (allbooks). By using {{categories}} for non-subject categories, one avoids nuisance reports to Category:Attention needed (allbooks).
This template can be induced to display diagnostic messages, explaining why it has put the page in Category:Attention needed (allbooks), by specifying parameter
diagnose=true.
See also[edit]
- {{categories}} — adds book to non-subject categories
- {{alphabetical}} — adds book to alphabetical classification
- {{status}} — classifies book by completion status
- {{FULLBOOKNAME}} — Returns the name of the book including the namespace | https://en.wikibooks.org/wiki/Template:Subjects/doc | CC-MAIN-2015-40 | en | refinedweb |
Anyways, usually I did that setting some flag so that the second time a function is called, it'd check that flag and would skip the function in the second time.
After tinkering a bit about it, I came with a shorter version changing the function code which I thought is pretty nice:
def func(): print 'Calling func only this time' func.func_code = (lambda:None).func_code
Now, calling it a second time will actually execute the lamda:None code.
Yes, I know you'll say it's cryptic, so, I'm including a decorator version below which does the same checking with a variable:
@call_only_once def func(): print 'Calling func only this time' def call_only_once(func): def new_func(*args, **kwargs): if not new_func._called: try: return func(*args, **kwargs) finally: new_func._called = True new_func._called = False return new_func
11 comments:
Actually, I would use the version that sets a func.flag, but implemented as a decorator. Once you got rid of the repeated boilerplate, you don't need to make things harder for the readers that don't know about func_code.
When doing decorators you should also always use functools.wraps which ensures the docstring is copied and similar issues
For more fun, and in order to improve the debugging skills of your co-workers, replace
(lambda:None)
with
random.choice([ob for ob in globals().values() if inspect.isfunction(ob)])
Hi Adriano, I agree with you (so, in my real use cases I used the version that sets a flag and not the one with the func_code).
Still, I posted it because I believe knowing about func_code has its own uses -- such as providing a different version after a function is called the first time -- maybe to lazy evaluate requisites or some other not so common use-cases :)
Hi Anonymous :)
Yes, functools.wraps should definitely be used (thanks for the reminder).
And Anonymous 2: I agree, that'd be much more fun :)
it's actually not "call once", it's "call many, execute once"
Isn't this also called a "Final"?
looks like in python 3 this would be:
func.__code__ = (lambda:None).__code__
Is that right, or is there a prettier way in 3.x?
Anonymous: I'm not sure if this is called a 'final' (final for me is when you can't reassign something).
arrg: Yes, this code is python 2.x. For python 3 you'd have to set __code__.
Regarding the decorator example, why bother with try/finally? I'd assign the _called just before executing the function itself. What do you think?
Hi Ron,
I guess that'd be Ok too :) | http://pydev.blogspot.com/2012/12/python-tricks-making-sure-function-is.html?showComment=1354963787901 | CC-MAIN-2015-40 | en | refinedweb |
Introduction to JBoss Seam
1) Introduction
Yet another Web Application Framework! This time it is from JBoss Community. JBoss provides a new Web Application Framework called “JBoss Seam” which combines the advantages from the two rapidly growing technologies Enterprise Java Beans 3.0 and Java Server Faces. JBoss Seam, by sitting on top of J2EE provides a nice way of integration between JSF and EJB Components with other great functionalities. This article is an introductory article only and it covers the idea that gave birth to JBoss Seam, its advantages, the various modules involved along with a Sample Application. This article assumes the readers to have some bit of knowledge and programming in areas like Java Server Faces and Enterprise Java Beans 3.0. For more information about these technologies, visit JSF and EJB 3.0.
2) The Need for JBoss Seam
Let us exactly define what JBoss seam is. JBoss Seam provides a Light-weight Container for J2EE standards and it addresses the long-standing issues in any Typical Web Application like State Management and Better Browser Navigation. It also neatly provides an integration between the two popular technologies, Java Server Faces (in the UI tier) and Enterprise Java Beans (EJB 3 in the Server Side). Before getting into the various details about JBoss Seam let us see the common set of problems that are being faced in a Development and the Usability of a typical Web Application using Java Server Faces and Enterprise Java Beans.
For this, let us assume an imaginary application. Let us keep the requirements of the imaginary Web Application we are going to consider very small. The Web Application is a simple Registration Application, which provides the user with a View which contains username and password text-fields. User can click the submit button after filling both the fields. If the username password information given by the user is not found in the database, the user is assumed to be a new user and he is greeted with a welcome message; else an error page is displayed with appropriate message.
Let us analysis the roles of JSF and EJB 3.0 in this Web Application. More specifically, we will analysis the various components in both the client as well the server tier along with their responsibilities.
In the Client side, for designing and presenting the form with the input controls (text-field and button) to the user, we may have written a userinput.jsp page with the set of JSF core tag libraries like <f:view> and <h:form>. And then, a JSF Managed Bean called UserBean encapsulated with properties(username and password) may have been coded which serves as a Model. The UI components values within the JSP page would have got bound to the properties of the Managed Bean with the help of JSF Expression Language. Since the logic is to query from the database for the existence of the username and the password, a Stateless Session Facade Bean would have been written whose sole purpose is to persistence the client information to the database. For persistence the information, the Session Bean may depend on the EntityManager API for querying and persisting entities.
The Data Traversal Logic from JSF to EJB Session Bean should have be taken care by the Managed Bean only. The Managed Bean apart from representing a Model may also act as a Listener in handling the button click events. Say, after clicking the register button, one of the action methods within the Managed Bean would have been called by the framework, and here the Bean might have a JNDI Look-up to get an instance of the Session Bean to persisting or querying the user information. If we look at carefully, the JSF Managed Bean is serving as an intermediary between the transfer of Data from the Client to the Server. Within this Managed Bean is the code for getting a reference to the Session Bean for doing various other functionalities. Wouldn’t a direct commnication between JSF UI Components and the Enterprise Bean Components be nice? There is no purpose of the intermediate Managed Bean in this case. JBoss Seam provides a very good solution for this. Not only this many of the outstanding problems that are faced in a Web Application are also addressed and given solution in this Framework.
3) Advantages of JBoss Seam
Following are the major advantages that a Web Application may enjoy if it uses JBoss Seam Framework. They are
- Integration of JSF and EJB
- Stateful Web Applications
- Dependency Bijection Support
Let us look into the various advantages of JBoss Seam in the subsequent sections.
3.1) Integration of JSF and EJB
Today the Web Application World see more and more matured technologies that are focusing to establish an easy-to-use development by reducing lots and lots of boiler-plate code along with some other added functionalities in their own domains. Let us consider JSF and EJB technologies to extend further discussion regarding this.
EJB 3.0 which is a specification given from Sun has gained much popularity because of its simplified yet robust programming model. Much of the middle-ware related services like Security, Transactions, Connection Pooling etc is delegated to the container itself. Comparing to its predecessors, EJB 3.0 offers a POJO programming Model. No need for your beans to extend or implement EJB specific classes or interfaces. And also, along with the new specification Java Persistence API (JPA), an unified programming model to access the underlying database along with rich set of features are now possible thereby completely eliminating the heavy-headed entity beans.
What is JPA?.
Java Server Faces provides a Component-based approach for developing User Interface Components in a Web Application. It hides most of the boiler-plate code and provides a higher-level abstraction over the client request and the server response objects. Using Java Server Faces, a Web Application can be viewed by components making events and listeners executing the appropriate logic for the Events. No need for the traditional HttpServletRequest and HttpServletResponse object to extract the client input parameters and to generate the response.
JBoss provides a framework in which the Events that are emitted by the JSF UI Components can be directly handled by the Enterprise Beans. No need for the intermediate Managed Beans which establishes the data transfer from and to JSF and EJB.
3.2) Dependency Bijection Support
Before getting into Dependency Bijection, it is wise to look at the two types: namely Dependency Injection and Dependency Outjection. These two are the popular patterns and modern Frameworks and Containers makes use of this abundantly. Let us see these two techniques
3.2.1) Dependency Injection
This model is used when a Component or a Service which is running inside some Framework or a Container is well known in the early stages so that the Framework/Container can create instances of them thereby taking the burden from the Clients. These type of model is used heavily in most of the J2EE Components, to name a few, EJB, Servlets, JMS etc.
For example, consider the following piece of code,
MySessionBean.java
@Stateless public class MySessionBean{ @PersistentContext private EntityManager entityManager; public void query(){ // Do something with EntityManager reference. } }
In the above code, the Session Bean (
MySessionBean) is depending on the services of
EntityManager. So, before the session bean starts using the
EntityManager object, the
EntityManager instance should be available. This is made simple because the EJB Container will creates a new instance of the
EntityManager object based on the various parameters taken from the Configuration File and injects this instance to the
EntityManager reference. This way of doing is called Dependency Injection.
In Seam, injection of a Component by the Seam framework is done by the use of
@In Annotation. For example, consider the following piece of code,
MyComponent.java
public class MyComponent{ @In private MyInjectorComponent compToBeInjected; }
When the Application gets deployed, Seam Framework will keep track of these Annotated Components and during run-time, upon creation of
MyComponent class and before the invocation of any other methods, the Container will inject and instance of an object of type
MyInjectorComponent to the
compToBeInjected reference.
3.2.2) Dependency Outjection
In Dependency Injection, usually the Framework injects the services to components that are in need of, whereas the reverse happens in Dependency Outjection. Consider the following piece of code,
MyComponent.java
public class MyComponent{ @Out private MyService myService; public MyComponent(){ } public MyService getMyService(){ return createMyService(); } private MyService createMyService(){ // My own way of creating } }
MyServiceClient.java
public class MyServiceClient{ @In private MyService myService; }
In the above code,
MyComponent class is acting as a factory class for creating
MyService objects. Assume that this
MyComponent class is a controlled class meaning that is being managed by the Container or the Framework wherever it is embedded. Situations may arise such that the
MyService object might be needed by the Container so that the Container can use this
MyService to inject dependencies on other components or clients. For instance,
MyServiceClient is in need of
MyService object and the Container can inject this reference from the object that it has taken from MyComponent class. This model is Dependency Outjection where a Service is taken by the Container from the Component.
Seam supports both these models in the form of
@In and
@Out annotations. For example consider the following class,
MyClass.java
public class MyClass{ @In private MyServiceOne service1; @out private MyServiceTwo service2; }
The above code can be read like this. The container injects the reference to service1 because it is annotated with
@In (meaning for Injection). And at a later stage, the Container may taken the reference of service2 (because of
@Out) thereby serving it to other needful Components.
3.3) Stateful Web Applications
Since the underlying protocol used in a Web Application is Http, all Web Application are Stateless in nature. Precisely it means that all the requests that are coming from the Client Browser are treated as individual requests only. The Server shows no partiality for the Client Requests. It is up to the Application or the Framework to identify whether requests are coming from the same Client or not. Session Management in Web Application is a time-consuming job and typically Servlets/Jsp provides various ways to manage sessions. Even in this case, Application Developers still have to depend on HttpSession like classes for creating session objects and storing/retrieving objects from the session.
JBoss Seam provides an excellent way to Manage States amongst multiple client requests. The State Management Facility is tightly integrated with Seam Components in the form of various Contexts. Following are the most commonly used Contexts in Seam.
- Conversation – A Conversation generally represents multiple requests that come from the same client. Components that come under this category can remember the state of the client and this forms the heart of State Management in Seam.
- Application – This context generally holds information that is used by various components within the same Application.
- Session – Components that fall under this category simply uses Http Session Api for state management.
- Page – The scope of the components managing the information is restricted to the Current Page only. All the stored information is lost when the user leaves this current page.
- Stateless – Components that falls under this category don’t manage state between multiple Client Requests.
4) Seam Components
Components in Seam take over the functionality as well the Business Logic in a Web Application. The good thing is that all Seam Components are POJO Objects only meaning that Components don’t have to extend or implement any Seam specific Classes or Interfaces. The Seam Component Model follows the standard Java Bean Component Architecture. Even it is possible to Annotate Enterprise Beans (Session Beans, Entity Beans and Message Driven Beans) as Components. Seam provides an excellent framework for Integrating Enterprise Beans into the Seam Framework.
As mentioned before, Components in Seam carries the business process involved in a typical Web Application. Seam Components may also act as Listeners for JSF UI Components or they may even interact with the Database. Since Seam provides a good deal of Integration with EJB Components, Developers generally prefer having Stateful Session Beans as Listeners and the Entity Beans for interacting with the Database.
Usually Seam Components are identified by Name, Scope and Role. These are Declarative Identifications meaning that they are represented directly in the Source Code with the help of Annotations. For example, consider the following Seam Component,
MyStatefulBeanImpl.java
@Stateful @Name("MyStatefulBean") @Scope(ScopeType.SESSION) public MyStatefulBeanImpl implements MyStatefulBean{ public String statefulMethod(){ } }
In the above code, the
@Name Annotation represents the name of a Seam Component using which other Managed Seam Components can access it.
@Scope defines how life this Component will survive during the Web Application. Since the above class is actually a Session Bean, it has been marked with
@Stateful Annotation.
5) Support for Annotations
Most of the boiler-plate needed for the various parts of the Application is taken care by the Seam because of the declarative style of programming available in the form of Annotations. Most of the Annotations that we see in Seam are a mix of EJB 3.0 and Seam specific Annotations. They are a number of Annotations defined in the Seam Api and this section just provides an overview of the most commonly used Annotations. For a complete list of Annotations and their purpose, have a look at the Seam Documentation.
5.1) Annotations for Seam Components
Components in Seam are really POJO’s and they play a major role in keeping the Application stateful. Following are the most commonly used Annotations related to Seam Components are given below.
- @Name
- @Scope
- @JndiName
5.1.1) @Name Annotation
This Annotation specifies the name of a Seam Component so that other Components (Seam Components or JSF Pages) can refer the Component by its name. This Annotation is mandatory for a class that is going to act as a Seam Component. Following is an example of the Seam Component.
MyComponent.java
@Name("MyComponent") public class MyComponent{ // Some Functionalities here. }
5.1.2) @Scope Annotation
This Annotation, if given, tells to the Seam Framework about the scope (or the life-time) for a Seam Component. The values for this Annotation are taken from the
org.jboss.seam.ScopeType Enum and can be any of the following values:
APPLICATION,
CONVERSATION,
SESSION,
PAGE,
STATELESS etc. Following is a sample code snippet for using the
@Scope Annotation.
MyComponent.java
@Scope(ScopeType.SESSION) @Name("MyComponent") public class MyComponent{ // Some Functionalities here. }
5.1.3) @JndiAnnotation Annotation
This Annotation should not be applied to normal Seam Components but to Components that represent any of EJB or JMS Service. Following is an example of one such component.
MySessionBean.java
@JndiName("ejb/session/MySessionBean") Public class MySessionBean{ }
5.2) Annotations for Components Lifecycle
All Components in Seam have well defined Life-cycle Management as defined by the Seam Framework. Management of Component Life-cycle is important as it has dependencies over the state Management of Web Applications. Following are the most commonly used Annotations for Life-cycle Management.
- @Create
- @Destroy
5.2.1) @Create Annotation
When a method inside a Seam Component is marked with
@Create Annotation, then this method will be called immediately after the creation of Seam Component. Most of the costly Resources like Initializing File or Database can be coded in this method. Following is the sample code snippet for the same,
MyDatabaseUtils.java
@Name("MyDatabaseUtils") Public class MyDatabaseUtils{ @Create() public void initDatabase(){ } }
5.2.2) @Destroy Annotation
If
@Create Annotation for a Seam Component deals with initialization Stuffs for a object, then the code inside the
@Destroy Annotation can be used to Release Resources or References to other objects. This method will be called before a Component is going to get removed from any of the Context as mentioned by its Scope.
MyDatabaseUtils.java
@Name("MyDatabaseUtils") Public class MyDatabaseUtils{ @Destroy() public void closeDbResources(){ } }
6) Events
Events can occur in a Seam Application through various Sources. And the events emitted by the Sources are captured and handled by the Listeners. Let us look into the various Sources that are responsible for emitting Events. Source can be any one of the following items.
- Seam Page Events
- Seam Component Driven Events
- JSF Components Emitting Events
Let us look into the details one by one.
6.1) Seam Page Events
Before Seam renders a page to Display, it checks whether the page has registered itself for firing any Events. If that’s the case, then the Framework will fire Events on behalf of the Web Pages. For example, consider the following situation. If the Home Page of a Web-site is eager to display the hits to this page, then the following could be possible,
pages.xml
<pages> <page view- </page> </pages>
The above is the
pages.xml file and it should be placed in the
WEB-INF directory of the Web Application module. The above code essentially tells that whenever a request comes for
/hit-count.jsp, then call the method
initHits() available in the
Hitter class.
6.2) Seam Component Driven Events
It is possible by the Seam Components also to emit Events. There is a Low-degree of Coupling between the Initiation and the Handling of Events. Even Custom Events can be declared specified either in the Xml File or through Annotations. For example, consider the following code snippet,
components.xml
<components> <event type = "myEvent"> <action expression = "#{MyComponent.myCallBack1}"/> <action expression = "#{MyComponent.myCallBack2}"/> </event> </components>
The above Xml File must be placed in the
WEB-INF directory of the Web Application Module. If anyone raises an Event of type (
'myEvent' is just a string), then the following methods defined inside the action element will be fired. For, example, consider the following code,
MyEventInitiator.java
public class MyEventInitiator{ public void initEvent(){ Events.instance().raiseEvent("myEvent"); } }
MyComponent.java
public MyComponent{ public void myCallBack1(){ System.out.println("My Call back method 1 called"); } public void myCallBack2(){ System.out.println("My Call back method 2 called"); } }
If some one initiates the Event by calling
MyEventInitiator.initEvent(), then a event of type myEvent gets raised. Since this Listeners (Actions) of this Event are defined in the components.xml file in the form of
MyComponent.myCallBack1() and
MyComponent.myCallback2(), both these methods will be fired in the order in which they are defined.
6.3) JSF Components Emitting Events
As discussed previously, Seam components can act as Listeners for the JSF UI Components emitting Events. For example, consider the following piece of Code,
calculate.jsp
<h:commandButton </h:commandButton>
In the above snippet code, whenever the User clicks the Calculate Tax button, the
calculateTax() method defined inside the
TaxCalculator class will be called. Following is how the
TaxCalculator component looks like,
TaxCalculator.java
@Name("TaxCalculator") @Scope(ScopeType.PAGE) public class TaxCalculator{ public void calculateTax(){ } }
7) Sample Application
With the concepts and theories in mind, let us develop a minimal functionality Shopping Application using JBoss Seam Framework. The following are the list of softwares/products needed for the Sample Application to work.
The functionality of the Shopping Application is simple. It initially provides a page prompting for the user name before beginning the Shopping Application. Then a List of products along with Product Id, Name, Description, Price, Number of Items to be checked-out are shown to the user. The user can select the product items he wants and continues to check-out. After that a brief summary of the Product Information is shown to the User .
Since the Shopping Application it is an Enterprise application, it involves the Creation of Jar and War files and then packaging them as an Ear File. Let us look into the various files and the directory structure required for completing this Application.
7.1) View Files
These set of files represents the view files rendered as a result of client making the client. Let us look into the various files involved one by one.
7.1.1) index.html
index.html
<html> <head> <meta http- </head> </html>
This is the file that will be request when the client starts the Application in the Web Browser. This file immediately redirects to
user.seam. Later, we will see that the extension seam is mapped to xhtml (for Facelets). Here is the content of
user.xhtml.
7.1.2) user.xhtml
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ""> <html xmlns="" xmlns: <body> <center> <h2>Welcome to Shopping.</h2> </center> <h:form> <center> To contine shopping, Please enter your name: <br/> <h:inputText<br/> <h:commandButton </center> </h:form> </body> </html>
The things to note that is there is a Component called
User with a property called
name. As mentioned in the preceding sections, the user will be prompted to enter his name before continue with the shopping. The entered name will be mapped to the name property in the User class. Also note that when the clicks the “Continue Shopping” button, the action is re-directed to “product-list” which actually maps to
product-list.xhtml (which is defined the
navigation.xml) file. Given below are the contents of
product-list.xhtml file.
7.1.3) product-list.xhtml
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ""> <html xmlns="" xmlns: <body> <center> <h2>List of available products</h2> Hi, <b>#{User.name}</b> Following are the available producst in our shopping repository <:inputText </h:column> </h:dataTable> </center> <center> <h:commandButton <h:commandButton </center> </h:form> </body> </html>
Note that a class called Product with properties name, description, price, noOfItems, amount, id (Primary key) is defined which represent of the products in the shopping repository. Access to the product objects such as displaying is facilitated with the help of ShoppingManager Session Bean. Note that this Session Bean is configured to act as a Seam Component in this Application. As soon as this page loads, all the Product information from the Products Table, is fetched and it is displayed in this page.
We have two buttons at the bottom of the page. One is to navigate to the Home page and the other is to checkout the products. As soon as the user clicks the “Checkout Products” button, the
ShoppingManager bean is fired which will calculate the amount of the individual products the user has selected. Upon completion of the method, checkout action is returned which is mapped with
product-summary.xhtml page as defined in the
navigation.xml file.
7.1.4) product-summary.xhtml
Given below are the contents of
product-summary.xhtml file which will display the summary information of the user selected products along with the total price information.
product-summary.xhtml
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ""> <html xmlns="" xmlns: <body> <center> <h2>Shopping Summary</h2> Hi, <b>#{User.name}</b> <:outputText </h:column> <h:column> <h:outputText </h:column> </h:dataTable> </center> <center> Total Price: <b><h:outputText</b> Happy Shopping. <h:commandButton </center> </h:form> </body> </html>
7.2) Resource Files
In Seam terms, all the Configuration Information (
web.xml,
application.xml) and the various Resources Messages are defined in terms of resources. Let us have a look into the various Configuration file.
7.2.1) application.xml
application.xml
<application> <display-name>Seam Hello World</display-name> <module> <web> <web-uri>app.war</web-uri> <context-root>/Shopping</context-root> </web> </module> <module> <ejb>app.jar</ejb> </module> <module> <java>jboss-seam.jar</java> </module> <module> <java>el-ri.jar</java> </module> <module> <java>el-api.jar</java> </module> </application>
This is the Application Level Configuration File which contains entries for the Web and the Java Module along with the libraries that are specific to JBoss and Seam.
7.2.2) ejb-jar.xml
ejb-jar.xml
<ejb-jar> <assembly-descriptor> <interceptor-binding> <ejb-name>*</ejb-name> <interceptor-class>org.jboss.seam.ejb.SeamInterceptor</interceptor-class> </interceptor-binding> </assembly-descriptor> <interceptors> <interceptor> <interceptor-class>org.jboss.seam.ejb.SeamInterceptor</interceptor-class> </interceptor> </interceptors> </ejb-jar>
These entries are necessary to make all the Ejb Components look like Seam Components. This is achieved by intercepting the functionality of the Ejb Components with the help of Interceptors.
7.2.3) persistence.xml
persistence.xml
<persistence> <persistence-unit <provider>org.hibernate.ejb.HibernatePersistence</provider> <jta-data-source>java:/DefaultDS</jta-data-source> <properties> <property name="hibernate.dialect" value="org.hibernate.dialect.HSQLDialect"/> <property name="hibernate.transaction.flush_before_completion" value="true"/> <property name="hibernate.hbm2ddl.auto" value="create-drop"/> <property name="hibernate.show_sql" value="true"/> </properties> </persistence-unit> </persistence>
The above file represents the Persistence Unit that the Session Bean may use to store and retrieve information about the product Objects from the Product table. A persistence Unit generally contains information about its Database, Driver Name, Dialect etc.
7.2.4) components.xml
Since we are using Session Bean for interacting with the Entity Object, we must specify the Jndi Name of the Session Bean so that Seam framework can lookup and create an instance of the Session Bean. This is achieved with the help of the following Xml File.
components.xml
<?xml version="1.0" encoding="UTF-8"?> <components xmlns="" xmlns: <core:init <core:manager </components>
7.2.5) navigation.xml
All the navigation rules in this Shopping Application are logically expressed in terms of action in this
navigation.xml file. The advantage of providing this kind of declarative mapping is that all the components will refer only to the Logical Outcome of the Action and not the Physical Location of the Page.
A.a
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE faces-config PUBLIC "-//Sun Microsystems, Inc.//DTD JavaServer Faces Config 1.0//EN" ""> <faces-config> <navigation-rule> <from-view-id>*</from-view-id> <navigation-case> <from-outcome>user</from-outcome> <to-view-id>/user.xhtml</to-view-id> <redirect/> </navigation-case> <navigation-case> <from-outcome>product-list</from-outcome> <to-view-id>/product-list.xhtml</to-view-id> <redirect/> </navigation-case> <navigation-case> <from-outcome>checkout</from-outcome> <to-view-id>/product-summary.xhtml</to-view-id> <redirect/> </navigation-case> </navigation-rule> </faces-config>
7.3) Source Files
Now let us look into the Various Java Source Files (representing Session Beans, Entities, etc). Following is the code for the Product Entity.
7.3.1) Product.java
Product.java
package net.javabeat.articles.jboss.seam.shopping; import org.jboss.seam.annotations.*; import javax.persistence.*; import java.io.Serializable; import static org.jboss.seam.ScopeType.SESSION; @Entity @Name("person") @Scope (SESSION) public class Product { private String name; private String description; private double price; private int noOfItems; private double amount; private long id; public Product() { } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public double getPrice() { return price; } public void setPrice(double price) { this.price = price; } public int getNoOfItems() { return noOfItems; } public void setNoOfItems(int noOfItems) { this.noOfItems = noOfItems; } public double getAmount(){ return amount; } public void setAmount(double amount){ this.amount = amount; } @Id @GeneratedValue public long getId() { return id; } public void setId(long id) { this.id = id; } }
Note that apart from qualifying this as an Entity, it has to be qualified with Annotations like
@Name and
@Scope to tell to the Seam Framework that it is a Seam Component. The Session Bean will store and retrieve the values from the
Product objects fetched from the Product Table.
7.3.2) Session Bean Interface
Following is the interface definition of the Session Bean. This Bean has various functionalities like Creating, Retrieving and Destroying products. Other that this it also performs the checkout operation which is nothing but the calculation of the no of items with the individual products along with the total amount calculation.
Shopping.java
package net.javabeat.articles.jboss.seam.shopping; import javax.ejb.*; import java.util.*; import net.javabeat.articles.jboss.seam.shopping.Product; @Local public interface Shopping { public List getProducts(); public void createProducts(); public void destroyProducts(); public String checkout(); public double getTotalPrice(); }
7.3.3) Session Bean Implementation
ShoppingManager.java
package net.javabeat.articles.jboss.seam.shopping; import java.util.List; import javax.ejb.*; import net.javabeat.articles.jboss.seam.shopping.Product; import net.javabeat.articles.jboss.seam.shopping.ProductDatabase; import org.jboss.seam.annotations.*; import org.jboss.seam.ejb.*; import static org.jboss.seam.ScopeType.SESSION; import javax.persistence.*; @Stateful @Name("ShoppingManager") @Scope (SESSION) public class ShoppingManager implements Shopping { private List products; private double totalPrice; @PersistenceContext(unitName = "ShoppingManager") private EntityManager entityManager; @Create public void createProducts () { Product product = null; product = createProduct("Sony Playstation", "Gaming Console The Sony Playstation 2", 9000d, 0); entityManager.persist(product); product = createProduct("Traffic Signal - DVD", "SKU-CODE: Traffic Signal - Sony & BMG", 240d, 0); entityManager.persist(product); product = createProduct("DVD Player", "Samsung P475 DVD Player", 3990d, 0); entityManager.persist(product); product = createProduct("COMPAQ Notebook", "COMPAQ Notebook PC - PRESARIO V6307", 49000d, 0); entityManager.persist(product); products = entityManager.createQuery( "select prdt from Product prdt").getResultList(); } private Product createProduct(String name, String description, double price, int noOfItems){ Product product = new Product(); product.setName(name); product.setDescription(description); product.setPrice(price); product.setNoOfItems(noOfItems); return product; } public List getProducts() { return products; } public String checkout(){ for(Product aProduct : products){ aProduct.setAmount(aProduct.getNoOfItems() * aProduct.getPrice()); totalPrice = totalPrice + (aProduct.getNoOfItems() * aProduct.getPrice()); } return "checkout"; } public double getTotalPrice(){ return totalPrice; } @Destroy @Remove() public void destroyProducts(){ } }
As already explained, the above code makes use of EntityManager to create and retrieve products from the products Table.
8) Conclusion
This is an Introduction article about the evolving JBoss Seam Framework. It started with the necessity of having this Framework and then continued with the discussion of having some many advantages of using this framework. The advantages like “EJB-JSF” Integration, the need for a Stateful Web Application and the exiting Dependency Bijection Functionality is covered in much detailed manner. Then the various core modules of the Seam Framework like Seam Components, the need and the usage of Seam Annotations over various modules along with the Various Types of Events emited by Seam Sources are given much consideration and discussed briefly. Then the article finally ended up with a Shopping Application thereby illustrating the various techniques and concepts available in JBoss Seam. | http://www.javabeat.net/introduction-to-jboss-seam/ | CC-MAIN-2015-40 | en | refinedweb |
Save your old pst.file from this location
C:\Documents and Settings\user\Local settings\Application data\microsoft\outlook
and paste it to your Desktop AND delete the origional pst file.
Now, Open up your MS outlook 2007..
import the old outlook.pst file….
File/import/import from another program/pst file/change directory to your old outlook.pst file from Desktop(or where ever u have saved it)
if still doesnt work…reinstall ms software07 again and do the above instruction again…
HTH.
REGISTER or login:
Discuss This Question: | http://itknowledgeexchange.techtarget.com/itanswers/outlook-2000-upgraded-to-outlook-2007/ | CC-MAIN-2015-40 | en | refinedweb |
Type Safe PPU and SPU pointers
Offload KB - features
Old Content Alert
Please note that this is a old document archive and the will most likely be out-dated or superseded by various other products and is purely here for historical purposes.
Inside offload scopes there are two types of addresses – local addresses and outer addresses. All addresses of data defined outside the offload scope become outer addresses. Addresses of data defined inside an offload block point to local SPU memory whereas outer addresses point to PPU memory. In order to be able to process outer addresses on the SPU, the __outer pointer keyword has been added to allow pointer declarations for outer addresses.
#include <liboffload> int gx; int * gptr; int main() { __blockingoffload { int lx; int * lptr; // pointer to local data lptr = gptr; // incompatible types: int * = __outer int * gptr = & lx; // incompatible types: __outer int * = int * gptr = & gx; // ok, both are outer lptr = & lx; // ok, both are local __outer int * optr; // pointer to hold outer address optr = & gx; // ok, outer pointer gets outer address optr = gptr; // ok, outer pointer initialised with outer address } }
This __outer keyword is used as a pointer modifier (just as const and volatile are in C/C++) and only required in an offload context, which includes pointer declarations inside offload blocks and in offload functions. In all other contexts it will be ignored.
As with the const and volatile qualifiers in C++, the __outer modifier can appear anywhere in the base type for a pointer declaration. For example __outer int * is equivalent to int __outer * , but not to int * __outer as the latter declares a local pointer type that is __outer qualified.
Inside offload contexts pointers declared with __outer (i.e. point to PPU memory) are incompatible with those declared without __outer (i.e. point to SPU memory) which has implications for conversions, overload resolution and name mangling.
#include <liboffload> T * x; __blockingoffload { // in this scope the PPU pointer x automatically becomes an __outer int * __outer T * z = x; // outer pointer variable on the SPU T y = * z; // dereferencing z causes an SPU software cache read * z = y; // writing SPU value to PPU variable }
The __outer qualifier allows the programmer to distinguish between two types of pointers within an offload scope: pointers to PPU memory and pointers to SPU memory:
#include <liboffload> __blockingoffload { int * p; // pointer to local store (SPU memory) __outer int * k; // pointer to PPU memory int v1 = * p; // dereferencing SPU pointer int v2 = * k; // dereferencing PPU pointer – DMA into SPU p = k; // illegal – SPU and PPU pointers are incompatible }
In the above example, variable p has type int *. Since this declaration appears in an offload scope, and since the __outer qualifier has not been used, the pointer p is restricted to point to memory on the local store of the SPU on which the enclosing offload block runs.
On the other hand, variable k has type __outer int *. The use of the __outer qualifier restricts k to point to shared PPU memory – memory outside the local store of the SPU on which the enclosing offload block runs.
Inside the offload block, trying to assign p to k (or vice versa) results in a compiler error. This strong type checking prevents dereferencing a PPU address in SPU memory (and vice versa). Of course, it is legal to dereference p and assign the result to the memory location pointed to by k (and vice-versa). Codeplay Offload automatically transforms this into appropriate DMA and/or software cache operations.
A pointer declared outside an offload scope always points to data in PPU shared memory. Such pointers are automatically given the __outer qualifier.
It is desirable to reduce the number of places in the source code where the __outer keyword has to be used. The two contexts where __outer may be omitted is in initializations and casts. This language feature is designed to avoid having to explicitly type __outer in front of declarations and in casts, and is especially useful when refactoring existing source code for Codeplay Offload, and during function duplication. Function duplication also allows __outer to be omitted from parameter and return pointer/reference types.
Pointer and reference types declared without the __outer modifier in offload contexts are local pointer/reference types (pointing to SPU data). However, if a pointer variable declared without __outer is initialized with an outer pointer type, the compiler will automatically deduce this property and make the variable an outer pointer.
#include <liboffload> T * x; __blockingoffload { __outer T * z = x; // explicitly declared __outer T * z2 = x; // silently deduces __outer from x // z and z2 have the same type: __outer T * }
The following example lists legal cases where __outer is deduced for references and pointers from different source types.
#include <liboffload> int * a = new int(3); static int b = 4; static int c[10] = {5}; __blockingoffload { int * d = a; // __outer int * int & e = b; // __outer int & int * f = c; // __outer int * from PPU array int * g = & b; // __outer int & h = * a; // __outer int & int & i = * c; // __outer int & }
Inside an offload context, when casting a value of pointer type __outer T * to another destination pointer type which does not declare __outer, then the destination pointer type of that cast automatically receives the __outer property.
#include <liboffload> T1 * global; int main() { __blockingoffload { __outer T2 * var; var = (T2 * )global; // T2 * inside the bracket is automatically changed to __outer T2 * } }
Valid uses of pointers in an offload block are illustrated by the following example.
#include <liboffload> float f_PPU; float * ptr_PPU; int main() { __blockingoffload { float f_SPU; float * ptr_SPU; __outer float * outer_ptr_SPU; ptr_PPU = & f_PPU; // PPU address to PPU variable outer_ptr_SPU = & f_PPU; // PPU address to SPU outer pointer outer_ptr_SPU = ptr_PPU + 1; ptr_PPU = outer_ptr_SPU + 1; * ptr_SPU = * ptr_PPU; // dma into SPU * ptr_SPU = * outer_ptr_SPU; // dma into SPU * ptr_PPU = * outer_ptr_SPU; // outer memory assignment * ptr_PPU = * ptr_SPU; // dma from SPU into PPU * outer_ptr_SPU = * ptr_PPU; // outer memory assignment * outer_ptr_SPU = * ptr_SPU; } }
These are cases that the compiler will not accept.
#include <liboffload> float f_PPU; float * ptr_PPU; int main() { __blockingoffload { float f_SPU; float * ptr_SPU; __outer float * outer_ptr_SPU; ptr_PPU = & f_SPU; // illegal to outer_ptr_SPU = & f_SPU; // assign SPU ptr_PPU = ptr_SPU; // address to PPU outer_ptr_SPU = ptr_SPU; // pointer ptr_SPU = & f_PPU; // illegal to assign ptr_SPU = ptr_PPU; // PPU address to ptr_SPU = outer_ptr_SPU; // SPU pointer } }
Pointer types can be declared using __declspec(__setoffloadlevel__(level)), where level is the offload block level which can have the value one for local pointer and zero for outer pointers. In fact __declspec(__setoffloadlevel__(0)) is equivalent to __outer and __declspec(__setoffloadlevel__(1)) is equivalent to __inner. This declspec allows declaring SPU local pointer types outside an offload block which may be useful inside structure declarations to explicitly declare structure members with local pointer types.
#include <liboffload> const int OUTER_DEPTH = 0; const int INNER_DEPTH = 1; template <class T, int DEPTH = OUTER_DEPTH> struct struct_with_pointer { __declspec(__setoffloadlevel__(DEPTH)) T * ptr; // depending on DEPTH either a local or outer pointer }; struct_with_pointer<int> var_outer_ptr; int outer_int; void f() { __blockingoffload { struct_with_pointer<int> var1_outer_ptr; struct_with_pointer<int, __OFFLOAD_DEPTH__> offload_local; int offload_int; int * localptr; offload_local.ptr = & offload_int; // ok, local address to local pointer localptr = offload_local.ptr; var_outer_ptr.ptr = & outer_int; // ok, outer address to outer pointer var_outer_ptr = var1_outer_ptr; // ok, same struct types } } | https://www.codeplay.com/products/offload/kb/type-safe-ppu-and-spu-pointers.html | CC-MAIN-2021-04 | en | refinedweb |
If you have a coding question, you can use stackoverflow with the tag 'errbot' and 'python'
Hi All
I am trying to schedule a cron job to post message on slack using my Err Bot . Has anyone used a scheduler/cron job for the same?
def activate(self): super().activate() self.start_poller(60, self.oncall(msg,args), times=1)
Above is my code snippet I am using to schedule my function oncall after every 60seconds.
@botcmd def oncall(self, msg, args):
Above is the function I am trying to call from my scheduler/cronjob
@achoudh5 yes I use the poller often. The mistake you've made is that you calling the on_call method in the start poller rather than just passing the method. For example it should be
def activate(self): super().activate() self.start_poller(60, self.oncall, 1, msg, args) def oncall(self, msg, args): print("in on call")
I also noticed that your activate method doesn't contain the variables
msg or
args so are you making an assumption that they are present? If they are not going to be present then your code should be:
def activate(self): super().activate() self.start_poller(60, self.oncall, 1) def oncall(self): print("in on call")
@nzlosh my config.py looks like this for ErrBot:-
import logging BACKEND = 'Slack' BOT_IDENTITY ={'token':'xoxb-<slack OAuth Bot token>'} BOT_DATA_DIR = r'<path>/ErrBot/errbot/data' BOT_EXTRA_PLUGIN_DIR = r'<path>/ErrBot/errbot/plugins' BOT_LOG_FILE = r'<path>/ErrBot/errbot/errbot.log' BOT_LOG_LEVEL = logging.DEBUG BOT_ADMINS = ("<slack_admin_name>")
Is there any more setting I need to do to make it work?
Slackbackend you must have a legacy token. To create one, follow these instructions.
pip install errbot[slack]which works, but when I start up I get log messages: ```
slack_backend
pip install errbot[slack-rtm]to install it.
To use the
Slackbackend you must have a legacy token. To create one, follow these instructions.
@nzlosh there seems to be upgradation, there is not bot scope I have seen and rest I followed similar process before as well :/
slack_rtmbackend but my understanding is you need to use slackclient v2 with it.
BACKEND = 'Slack'
I have a command with an argument. I'd like it to work with a default value if the argument is omitted.
@arg_botcmd("service_name", type=str, default="default_service", help="Service name") def xstatus(self, msg, service_name): ...
What's the right way to do it? I get an error when I try to run it w/o an argument:
User: !xstatus myservice Errbot: ok User: !xstatus Errbot: I couldn't parse the arguments; the following arguments are required: service_name
errbot --storage-get <plugin>gives me the right config dict back but it's not actually applied -- errbot complains that the plugin isn't configured when it boots. I don't think errbotio/errbot#910 should have been closed.
2020-12-04 20:12:40,809 DEBUG errbot.botplugin Previous timer found and removed
latesttag in docker hub:
docker pull errbotio/errbot:6.1.7
@re_botcmd(pattern=r"(?=.*re?)(?=.*cmd)(?=.*param=(?P<param>\w+))", flags=re.IGNORECASE) def my_re_cmd(self, msg, match): my_parm = match.groupdict().get("param", None) yield f"My re cmd with a param={my_param}" ... @cmdfilter def my_filter(self, msg, cmd, args, dry_run): if cmd == self.my_re_cmd.__name__: return msg, "my_func", None return None, None, None @re_botcmd(pattern=r"ah ah") def my_func(self, msg, match): print(match.groupdict()) # {'param' = 123} BUG??? yield "My func"
Syntax error in the given configurationwhen trying to configure my plugins through Slack. It seems to happen intermittently. Anyone know what might be the cause? I've documented a similar issue here before errbotio/errbot#1298 | https://gitter.im/errbotio/errbot | CC-MAIN-2021-04 | en | refinedweb |
SlidingRuler
SlidingRuler is a Swift package containing a SwiftUI control that acts like an linear infinite slider or a finite, more precise one. The notable difference is that the user can evaluate the value more precisely on a sliding ruler rather than on a slider. By default it shows a ruler you can slide around and a beautiful red cursor pointing toward the current value :
These features are the supported features :
- [x] Dynamic type
- [x] Haptic feedback (on compatible devices)
- [x] Light & dark color schemes
- [x] Scroll inertia & rubber banding
- [x] Custom styling
- [x] Animations
- [x] Pointer interactions
- [ ] Layout direction
- [ ] Accessibility
It's been made to feel native and to integrate nicely in iOS and iPadOS.
Installation
dependencies: [ // Dependencies declare other packages that this package depends on. .package(url: "", .upToNextMajor(from: "0.1.0")), ],
Usage
Before using anything be sure to
import SlidingRulerin the target swift file.
Like any SwiftUI control you can create a
SlidingRuler with an unique parameter: the value.
Like any SwiftUI input control the value is a
Binding<...> :
@State private var value: Double = 0 var body: some View { ... SlidingRuler(value: $value) ... }
Note that
value must conform to
BinaryFloatingPoint.
✅ When to use ?
It's good to use a sliding ruler in these cases:
- To input a numeric value that belongs to an unlimited range or a particularly large one.
- To input measurable values like masses or lenghts.
- To pick a precise value in a tiny range —for this use a small
stepvalue.
- You're already using multiple sliding rulers in your form and using a slider for this value will break the continuity. —Ok, but read the next section first.
- You just feel like to and you're confident it'll be ok. —Ok, but read the next section first.
Additionaly a disabled slinding ruler can be used as a meter.
⛔️ When not to use
It's bad to use a sliding ruler in these cases:
- To make the user chose between a small to medium set of discrete values. → Use a
Pickeror a
Stepper.
- To pick an unprecise value in a small closed range. → Use a
Slider.
- To change a device audio volume. → Use a
Slider.
- To let the user input an arbitrary value like its age. → Use a
TextField. Perhaps UI/UX design is not for you after all...
- To input a date component. → Use a
DatePicker. Are you out of your mind ?
Using finite or semi-finite ranges
In some cases you may want to use such ranges when it makes sense —particularly when inputing strictly positive or negative values.
A slinding ruler will show these boundaries clearly to the user :
The user is not allowed to drag the ruler above these boudaries. Trying so will result in an haptic feedback (on compatible devices) and the over drag will feel like a rubber band, like a scroll view.
Methods added to
View
SlidingRuler don't have no direct method but like many SwiftUI controls it adds some methods to
View. They work in the same fashion as other
View methods that impact a component and all its descendent in a view tree.
slidingRulerStyle
func slidingRulerStyle<S>(_ style: S) -> some View where S: SlidingRulerStyle
Sets the style for all sliding rulers within the view tree. See the Custom Styling Guide (once it's been written).
slidingRulerCellOverflow
func slidingRulerCellOverflow(_ overflow: Int) -> some View
Sets the cell overflow for all sliding rulers within the view tree. See the Custom Styling Guide (once it's been written).
You may get retired without even using this method, ever.
Parameter list
The complete
init method signature is :
init(value: Binding<V>, in bounds: ClosedRange<V> = -V.infinity...V.infinity, step: V.Stride = 1, snap: Mark = .none, tick: Mark = .none, onEditingChanged: @escaping (Bool) -> () = { _ in }, formatter: NumberFormatter? = nil)
bounds :
ClosedRange<V>
The closed range of possible values.
By default it is
-V.infinity...V.infinity. Meaning that the sliding ruler is virtualy infinite.
step :
V.Stride
The stride of the SlidingRuler.
By default it is
1.0.
snap :
Mark
Possible values :
.none,
.unit,
.half,
.fraction.
Describes the ruler's marks stickyness: when the ruler stops and the cursor is near a graduation it will snap to it.
.none: no snap.
.unit: snaps on every whole unit graduations.
.half: snaps on every whle unit and half unit graduations.
.fraction: snaps on every graduations.
By default it is
.none.
Note: to trigger a snap the cursor must be near the graduation. Here near means that the delta between the cursor and the graduation is strictly less than a fraction of the ruler unit.
The value of a fraction is driven by the style's
fractions property. The default styles have a
fractions property equal to
10 so a fraction equals to
1/10 of a unit or
0.1 with the default
step (
1.0).
tick :
Mark
Possible values :
.none,
.unit,
.half,
.fraction.
Defines what kind of graduation produces an haptic feedback when reached.
.none: no haptic feedback.
.unit: haptic feedbak on every whole unit graduations.
.half: haptic feedbak on every whole unit and half unit graduations. (If the style's fraction count allows an half)
.fraction: haptic feedbak on every graduations.
By default it is
.none.
onEditingChanged :
(Bool) -> Void
A closure executed when a drag session happens. It receives a boolean value set to
true when the drag session starts and
false when the value stops changing.
By default it is an empty closure that produces no action.
formatter :
NumberFormatter
A
NumberFormatter instance the ruler uses to format the ruler's marks.
By default it is
nil.
Slinding ruler styles
For a comprehensive custom styling documentation See the Custom Styling Guide (once it's been written).
Custom styling is still a work in progress. As it is tied to accessibility some work on this topic is still required to determine how a style should adapt to it.
By default
SlindingRuler ships with four styles. Two of them don't show any mark on the ruler
PrimarySlidingRulerStyle
This is the default style.
CenteredSlindingRulerStyle
BlankSlidingRulerStyle
BlankCenteredSlidingRulerStyle
Example
Percentage value
A SlindingRuler that goes from 0 to 100%, that snaps and gives haptic feedback on any graduation.
struct PercentSlidingRuler: View { @State private var value: Double = .zero private var formatter: NumberFormatter { let f = NumberFormatter() f.numberStyle = .percent f.maximumFractionDigits = 0 return f } var body: some View { SlidingRuler(value: $value, in: 0...1, step: 0.1, snap: .fraction, tick: .fraction, formatter: formatter) } } | https://iosexample.com/a-sliding-ruler-control-for-swiftui/ | CC-MAIN-2021-04 | en | refinedweb |
Heads up! To view this whole video, sign in with your Courses account or enroll in your free 7-day trial. Sign In Enroll
Preview
Provide and Consume State5:37 with Guil Hernandez
Now that context is set and the provider is in place, we'll provide state to the entire app. We'll set up Consumers that subscribe (or connect) to the
Provider component to make use of the context.
In the previous video, 0:00
we wrapped the children of app, in the Provider component. 0:01
The Provider usually lives at the top level of your app. 0:04
And it's what's going to provide the actual data 0:07
that needs to be shared throughout the component tree. 0:10
The provider component requires a value prop to share data. 0:13
The value can be anything, but it's usually the application state and 0:17
any actions or event handlers shared between components. 0:21
Let's first pass the value prop our player state, with this.state.players. 0:24
In React Dev tools, notice how Context.Provider now holds 0:29
the players array and state and its value prop. 0:34
So any component that's a descendant of the provider will have access to 0:39
the data given to the value prop. 0:43
And the way you access that data is with a consumer. 0:45
The Provider provides the context and a consumer consumes and 0:48
subscribes to that context. 0:53
A single provider can be connected to many consumers, 0:55
no matter how far down they are on the component tree. 0:58
So let's start by adding context to the Stats component. 1:01
In Stats.js, we'll import the consumer using a named 1:05
import with import consumer from ./context. 1:10
Then in the function's return statement, 1:17
we'll use the Consumer component by adding opening and closing Consumer tags. 1:19
To render anything inside the consumer, 1:26
you use a pattern in React called a Render Prop. 1:28
Render Prop refers to a method for 1:31
sharing code between React components using a prop whose value is a function. 1:33
A component is provided a prop which takes a function that returns a React element. 1:38
You can learn a whole lot more about Render Props in the resources listed in 1:43
the teacher notes. 1:46
This pattern is also called function as a child. 1:47
Because instead of passing a prop, you're also able to write a function inside 1:50
the opening and closing Consumer tags. 1:54
The function returns the part of the UI you want to render. 1:56
So we'll use a function that returns the stats UI inside the Consumer. 1:59
This function is required and needs to be placed inside a JSX expression. 2:04
So let's add a set of curly braces inside the Consumer tags, 2:09
then the function that will render something based on the context. 2:12
The function takes the current context value as a parameter, and returns JSX. 2:16
This parameter is commonly named value or context, I'll name it context. 2:22
The context parameter passed to the function will be equal to the value prop 2:28
of the provider. 2:32
In other words, the data we pass into the provider's value prop 2:34
is made available here via the context parameter. 2:38
So the consumer is now subscribed to any context changes. 2:42
Now we're able to access the player's data directly from within the stats component. 2:46
In the body of the function, I'll add the return keyword in a set of parentheses 2:51
to wrap the JSX, then move the entire stats table inside the return statement. 2:56
Next, we need to move the total players and total points variables inside 3:05
the consumer, just above the child function's return statement. 3:10
That way, we can access context. 3:14
Then, replace props.players.length with context.length and 3:20
props.players.reduce with context.reduce. 3:26
And since we're no longer passing the player state to the Stats 3:31
component as props, we can delete the props parameter. 3:36
As well as the entire propTypes object below the function and 3:40
the PropTypes import statement up top. 3:44
In Header.js, we can delete the players prop passed to the Stats component, 3:48
because it no longer needs it. 3:54
And remove the object and players variable in the Header function's parameters, 3:57
as well as the propTypes object below the function, and the PropTypes import. 4:02
Over in app.js, the player state no longer needs to make it's way through the Header. 4:09
So we can also remove the player's prop past to the Header component. 4:15
Our Header component now looks much cleaner and has less responsibilities. 4:20
It's just a stateless component that renders the Stats title and Stopwatch. 4:25
The Stats component displays all the scoreboard stats just like before. 4:30
But instead of props, 4:35
it's getting the player's data from the provider via the consumer. 4:36
So any time there's a change in the player's state, 4:41
the consumer get the data it needs to update the UI from the provider. 4:45
All right, now that you've learned how to use a consumer and access context 4:50
set by the provider, why don't you use a consumer inside the player list component. 4:54
The consumer here needs to access the player state just like we did with 5:00
the stats component. 5:04
And it should provide that data to the map function returning each player and state. 5:05
I'll show you my solution in the next video. 5:10
You may have noticed something you haven't seen yet in React. 5:13
The React.Fragment tags inside the return statement. 5:16
A React.Fragment lets you group a list of sibling elements or 5:19
components without having to add an unnecessary reaper element. 5:23
It doesn't render anything out to the DOM so 5:27
I'm just using it here to contain the list of player components. 5:29
You can read more about React.Fragment in the teachers notes. 5:33 | https://teamtreehouse.com/library/provide-and-consume-state?t=255 | CC-MAIN-2021-04 | en | refinedweb |
Set Operations
import findspark findspark.init() import pyspark sc = pyspark.SparkContext()
Set Operations
Spark also provides functionality similar to the native Python
set operations.
Union
everyThree = [chr(x+65) for x in range(26)][::3] everyThree = sc.parallelize(everyThree) everyThree.collect()
['A', 'D', 'G', 'J', 'M', 'P', 'S', 'V', 'Y']
everyFour = [chr(x+65) for x in range(26)][::4] everyFour = sc.parallelize(everyFour) everyFour.collect()
['A', 'E', 'I', 'M', 'Q', 'U', 'Y']
Note that the
unioning removes duplicate entries
print(everyThree.union(everyFour).collect())
['A', 'D', 'G', 'J', 'M', 'P', 'S', 'V', 'Y', 'A', 'E', 'I', 'M', 'Q', 'U', 'Y']
Intersection and Subtract
vowelsAlways = ['A', 'E', 'I', 'O', 'U'] vowelsSometimes = vowelsAlways[:] + ['Y']
vowelsAlways = sc.parallelize(vowelsAlways) vowelsSometimes = sc.parallelize(vowelsSometimes) print(vowelsAlways.collect()) print(vowelsSometimes.collect())
['A', 'E', 'I', 'O', 'U'] ['A', 'E', 'I', 'O', 'U', 'Y']
vowelsSometimes.intersection(vowelsAlways).collect()
['O', 'I', 'A', 'U', 'E']
vowelsSometimes.subtract(vowelsAlways).collect()
['Y']
Set Operations Return more RDDs
You can get pretty complicated with your set operations. It’s all just RDDs.
wordSoup = everyFour.union(everyThree) sansVowels = wordSoup.subtract(vowelsAlways) print(sansVowels.collect())
['J', 'Q', 'S', 'D', 'M', 'M', 'Y', 'Y', 'G', 'P', 'V']
Distinct
text = 'the quick brown fox jumped over the lazy dog' text = sc.parallelize(text)
text.distinct().count()
26
Performance Note
Neat as this is, recall that Spark is only designed for the scope of each partition.
That’s to say that if you have your data split up over N partitions, the set operation per partition is probably pretty cheap, but then it has to exhaustively check in with each other partition to ensure that it’s performing the operation correctly.
This scales… poorly, haha | https://napsterinblue.github.io/notes/spark/basics/set_operations/ | CC-MAIN-2021-04 | en | refinedweb |
Detrending Data in Python with Numpy
⚠️ SEE UPDATED POST: Signal Filtering in Python
While continuing my quest into the world of linear data analysis and signal processing, I came to a point where I wanted to emphasize variations in FFT traces. While I am keeping my original data for scientific reference, visually I want to represent it emphasizing variations rather than concentrating on trends. I wrote a detrending function which I'm sure will be useful for many applications:
def detrend(data,degree=10): detrended=[None]*degree for i in range(degree,len(data)-degree): chunk=data[i-degree:i+degree] chunk=sum(chunk)/len(chunk) detrended.append(data[i]-chunk) return detrended+[None]*degree
However, this method is extremely slow. I need to think of a way to accomplish this same thing much faster. [ponders]
UPDATE: It looks like I've once again re-invented the wheel. All of this has been done already, and FAR more efficiently I might add. For more see scipy.signal.detrend.html
import scipy.signal ffty=scipy.signal.detrend(ffty)
Have something to say about this article? Let me know! | https://swharden.com/blog/page/29 | CC-MAIN-2021-04 | en | refinedweb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.