title
stringlengths 3
221
| text
stringlengths 17
477k
| parsed
listlengths 0
3.17k
|
|---|---|---|
HTML | DOM Audio Object
|
02 Aug, 2019
The Audio object is used for representing an HTML <audio> element.The Audio Object is a new object in HTML5.
Syntax:
For creating an <audio> element:var gfg = document.createElement("AUDIO")
var gfg = document.createElement("AUDIO")
For accessing an <audio> element:var x = document.getElementById("myAudio")
var x = document.getElementById("myAudio")
Property Values:
Audio Object Methods:
Below programs illustrate the Audio Object :Example-1: Creating a <audio> element.
<!DOCTYPE html><html> <head> <title>Audio Object</title> <style> h1 { color: green; } h2 { font-family: Impact; } body { text-align: center; } </style></head> <body> <h1>GeeksforGeeks</h1> <h2>Audio Object</h2> <p>Double Click the "Create" button to create an Audio Object.</p> <button ondblclick="Create()"> Create </button> <script> function Create() { // Create audio element. var m = document.createElement( "AUDIO"); if (m.canPlayType("audio/mpeg")) { m.setAttribute("src", "bells.mp3"); } else { m.setAttribute("src", "bells.ogg"); } m.setAttribute("controls", "controls"); document.body.appendChild(m); } </script> </body> </html>
Output:
Before clicking the button:
After clicking the button:
Example-2: Accessing a <audio> element.
<!DOCTYPE html><html> <head> <title>Audio Object</title> <style> h1 { color: green; } h2 { font-family: Impact; } body { text-align: center; } </style></head> <body> <h1>GeeksforGeeks</h1> <h2>Audio Object</h2> <audio id="track" controls> <source src="bells.ogg" type="audio/ogg"> <source src="bells.mp3" type="audio/mpeg"> Your browser does not support the audio element. </audio> <p>Double-click the "Access Audio" button to get the duration of the audio, in seconds.</p> <button onclick="access()">Access Audio</button> <p id="test"></p> <script> function access() { // Accessing audio element duration. var m = document.getElementById( "track").duration; document.getElementById("test").innerHTML = m; } </script> </body> </html>
Output:
Before clicking the button:
After clicking the button:
Supported Browsers: The browser supported by HTML | DOM Audio Object are listed below:
Google Chrome
Internet Explorer
Firefox
Opera
Apple Safari
HTML-DOM
Picked
HTML
Web Technologies
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 54,
"s": 26,
"text": "\n02 Aug, 2019"
},
{
"code": null,
"e": 163,
"s": 54,
"text": "The Audio object is used for representing an HTML <audio> element.The Audio Object is a new object in HTML5."
},
{
"code": null,
"e": 171,
"s": 163,
"text": "Syntax:"
},
{
"code": null,
"e": 245,
"s": 171,
"text": "For creating an <audio> element:var gfg = document.createElement(\"AUDIO\")"
},
{
"code": null,
"e": 287,
"s": 245,
"text": "var gfg = document.createElement(\"AUDIO\")"
},
{
"code": null,
"e": 363,
"s": 287,
"text": "For accessing an <audio> element:var x = document.getElementById(\"myAudio\")"
},
{
"code": null,
"e": 406,
"s": 363,
"text": "var x = document.getElementById(\"myAudio\")"
},
{
"code": null,
"e": 423,
"s": 406,
"text": "Property Values:"
},
{
"code": null,
"e": 445,
"s": 423,
"text": "Audio Object Methods:"
},
{
"code": null,
"e": 528,
"s": 445,
"text": "Below programs illustrate the Audio Object :Example-1: Creating a <audio> element."
},
{
"code": "<!DOCTYPE html><html> <head> <title>Audio Object</title> <style> h1 { color: green; } h2 { font-family: Impact; } body { text-align: center; } </style></head> <body> <h1>GeeksforGeeks</h1> <h2>Audio Object</h2> <p>Double Click the \"Create\" button to create an Audio Object.</p> <button ondblclick=\"Create()\"> Create </button> <script> function Create() { // Create audio element. var m = document.createElement( \"AUDIO\"); if (m.canPlayType(\"audio/mpeg\")) { m.setAttribute(\"src\", \"bells.mp3\"); } else { m.setAttribute(\"src\", \"bells.ogg\"); } m.setAttribute(\"controls\", \"controls\"); document.body.appendChild(m); } </script> </body> </html>",
"e": 1462,
"s": 528,
"text": null
},
{
"code": null,
"e": 1470,
"s": 1462,
"text": "Output:"
},
{
"code": null,
"e": 1498,
"s": 1470,
"text": "Before clicking the button:"
},
{
"code": null,
"e": 1525,
"s": 1498,
"text": "After clicking the button:"
},
{
"code": null,
"e": 1565,
"s": 1525,
"text": "Example-2: Accessing a <audio> element."
},
{
"code": "<!DOCTYPE html><html> <head> <title>Audio Object</title> <style> h1 { color: green; } h2 { font-family: Impact; } body { text-align: center; } </style></head> <body> <h1>GeeksforGeeks</h1> <h2>Audio Object</h2> <audio id=\"track\" controls> <source src=\"bells.ogg\" type=\"audio/ogg\"> <source src=\"bells.mp3\" type=\"audio/mpeg\"> Your browser does not support the audio element. </audio> <p>Double-click the \"Access Audio\" button to get the duration of the audio, in seconds.</p> <button onclick=\"access()\">Access Audio</button> <p id=\"test\"></p> <script> function access() { // Accessing audio element duration. var m = document.getElementById( \"track\").duration; document.getElementById(\"test\").innerHTML = m; } </script> </body> </html>",
"e": 2544,
"s": 1565,
"text": null
},
{
"code": null,
"e": 2552,
"s": 2544,
"text": "Output:"
},
{
"code": null,
"e": 2580,
"s": 2552,
"text": "Before clicking the button:"
},
{
"code": null,
"e": 2607,
"s": 2580,
"text": "After clicking the button:"
},
{
"code": null,
"e": 2694,
"s": 2607,
"text": "Supported Browsers: The browser supported by HTML | DOM Audio Object are listed below:"
},
{
"code": null,
"e": 2708,
"s": 2694,
"text": "Google Chrome"
},
{
"code": null,
"e": 2726,
"s": 2708,
"text": "Internet Explorer"
},
{
"code": null,
"e": 2734,
"s": 2726,
"text": "Firefox"
},
{
"code": null,
"e": 2740,
"s": 2734,
"text": "Opera"
},
{
"code": null,
"e": 2753,
"s": 2740,
"text": "Apple Safari"
},
{
"code": null,
"e": 2762,
"s": 2753,
"text": "HTML-DOM"
},
{
"code": null,
"e": 2769,
"s": 2762,
"text": "Picked"
},
{
"code": null,
"e": 2774,
"s": 2769,
"text": "HTML"
},
{
"code": null,
"e": 2791,
"s": 2774,
"text": "Web Technologies"
},
{
"code": null,
"e": 2796,
"s": 2791,
"text": "HTML"
}
] |
Program to find whether a given number is power of 2
|
11 Jul, 2022
Given a positive integer, write a function to find if it is a power of two or not.Examples :
Input : n = 4
Output : Yes
22 = 4
Input : n = 7
Output : No
Input : n = 32
Output : Yes
25 = 32
1. A simple method for this is to simply take the log of the number on base 2 and if you get an integer then the number is the power of 2
C++
C
Java
Python3
C#
PHP
Javascript
// C++ Program to find whether a// no is power of two#include<bits/stdc++.h>using namespace std; // Function to check if x is power of 2bool isPowerOfTwo(int n){ if(n==0) return false; return (ceil(log2(n)) == floor(log2(n)));} // Driver programint main(){ isPowerOfTwo(31)? cout<<"Yes"<<endl: cout<<"No"<<endl; isPowerOfTwo(64)? cout<<"Yes"<<endl: cout<<"No"<<endl; return 0;} // This code is contributed by Surendra_Gangwar
// C Program to find whether a// no is power of two#include<stdio.h>#include<stdbool.h>#include<math.h> /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ if(n==0) return false; return (ceil(log2(n)) == floor(log2(n)));} // Driver programint main(){ isPowerOfTwo(31)? printf("Yes\n"): printf("No\n"); isPowerOfTwo(64)? printf("Yes\n"): printf("No\n"); return 0;} // This code is contributed by bibhudhendra
// Java Program to find whether a// no is power of twoclass GFG{/* Function to check if x is power of 2*/static boolean isPowerOfTwo(int n){ if(n==0) return false; return (int)(Math.ceil((Math.log(n) / Math.log(2)))) == (int)(Math.floor(((Math.log(n) / Math.log(2)))));} // Driver Codepublic static void main(String[] args){ if(isPowerOfTwo(31)) System.out.println("Yes"); else System.out.println("No"); if(isPowerOfTwo(64)) System.out.println("Yes"); else System.out.println("No");}} // This code is contributed by mits
# Python3 Program to find# whether a no is# power of twoimport math # Function to check# Log base 2def Log2(x): if x == 0: return false; return (math.log10(x) / math.log10(2)); # Function to check# if x is power of 2def isPowerOfTwo(n): return (math.ceil(Log2(n)) == math.floor(Log2(n))); # Driver Codeif(isPowerOfTwo(31)): print("Yes");else: print("No"); if(isPowerOfTwo(64)): print("Yes");else: print("No"); # This code is contributed# by mits
// C# Program to find whether// a no is power of twousing System; class GFG{ /* Function to check if x is power of 2*/static bool isPowerOfTwo(int n){ if(n==0) return false; return (int)(Math.Ceiling((Math.Log(n) / Math.Log(2)))) == (int)(Math.Floor(((Math.Log(n) / Math.Log(2)))));} // Driver Codepublic static void Main(){ if(isPowerOfTwo(31)) Console.WriteLine("Yes"); else Console.WriteLine("No"); if(isPowerOfTwo(64)) Console.WriteLine("Yes"); else Console.WriteLine("No");}} // This code is contributed// by Akanksha Rai(Abby_akku)
<?php// PHP Program to find// whether a no is// power of two // Function to check// Log base 2function Log2($x){ return (log10($x) / log10(2));} // Function to check// if x is power of 2function isPowerOfTwo($n){ return (ceil(Log2($n)) == floor(Log2($n)));} // Driver Codeif(isPowerOfTwo(31))echo "Yes\n";elseecho "No\n"; if(isPowerOfTwo(64))echo "Yes\n";elseecho "No\n"; // This code is contributed// by Sam007?>
<script>// javascript Program to find whether a// no is power of two /* Function to check if x is power of 2 */ function isPowerOfTwo(n) { if (n == 0) return false; return parseInt( (Math.ceil((Math.log(n) / Math.log(2))))) == parseInt( (Math.floor(((Math.log(n) / Math.log(2)))))); } // Driver Code if (isPowerOfTwo(31)) document.write("Yes<br/>"); else document.write("No<br/>"); if (isPowerOfTwo(64)) document.write("Yes<br/>"); else document.write("No<br/>"); // This code is contributed by shikhasingrajput.</script>
Output:
No
Yes
Time Complexity: O(1)Auxiliary Space: O(1)
2. Another solution is to keep dividing the number by two, i.e, do n = n/2 iteratively. In any iteration, if n%2 becomes non-zero and n is not 1 then n is not a power of 2. If n becomes 1 then it is a power of 2.
C++
C
Java
Python3
C#
PHP
Javascript
#include <bits/stdc++.h>using namespace std; /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1;} /*Driver code*/int main(){ isPowerOfTwo(31)? cout<<"Yes\n": cout<<"No\n"; isPowerOfTwo(64)? cout<<"Yes\n": cout<<"No\n"; return 0;} // This code is contributed by rathbhupendra
#include<stdio.h>#include<stdbool.h> /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1;} /*Driver program to test above function*/int main(){ isPowerOfTwo(31)? printf("Yes\n"): printf("No\n"); isPowerOfTwo(64)? printf("Yes\n"): printf("No\n"); return 0;}
// Java program to find whether// a no is power of twoimport java.io.*; class GFG { // Function to check if // x is power of 2 static boolean isPowerOfTwo(int n) { if (n == 0) return false; while (n != 1) { if (n % 2 != 0) return false; n = n / 2; } return true; } // Driver program public static void main(String args[]) { if (isPowerOfTwo(31)) System.out.println("Yes"); else System.out.println("No"); if (isPowerOfTwo(64)) System.out.println("Yes"); else System.out.println("No"); }} // This code is contributed by Nikita tiwari.
# Python program to check if given# number is power of 2 or not # Function to check if x is power of 2def isPowerOfTwo(n): if (n == 0): return False while (n != 1): if (n % 2 != 0): return False n = n // 2 return True # Driver codeif(isPowerOfTwo(31)): print('Yes')else: print('No')if(isPowerOfTwo(64)): print('Yes')else: print('No') # This code is contributed by Danish Raza
// C# program to find whether// a no is power of twousing System; class GFG{ // Function to check if // x is power of 2 static bool isPowerOfTwo(int n) { if (n == 0) return false; while (n != 1) { if (n % 2 != 0) return false; n = n / 2; } return true; } // Driver program public static void Main() { Console.WriteLine(isPowerOfTwo(31) ? "Yes" : "No"); Console.WriteLine(isPowerOfTwo(64) ? "Yes" : "No"); }} // This code is contributed by Sam007
<?php // Function to check if// x is power of 2function isPowerOfTwo($n){if ($n == 0) return 0;while ($n != 1){ if ($n % 2 != 0) return 0; $n = $n / 2;}return 1;} // Driver Codeif(isPowerOfTwo(31)) echo "Yes\n";else echo "No\n"; if(isPowerOfTwo(64)) echo "Yes\n";else echo "No\n"; // This code is contributed// by Sam007?>
<script> /* Function to check if x is power of 2*/ function isPowerOfTwo(n) { if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1; } isPowerOfTwo(31)? document.write("Yes" + "</br>"): document.write("No" + "</br>"); isPowerOfTwo(64)? document.write("Yes"): document.write("No"); </script>
Output :
No
Yes
Time Complexity: O(log2n)
Auxiliary Space: O(1)
3. Another way is to use this simple recursive solution. It uses the same logic as the above iterative solution but uses recursion instead of iteration.
C++
Java
Python3
C#
Javascript
// C++ program for above approach#include <bits/stdc++.h>using namespace std; // Function which checks whether a// number is a power of 2bool powerOf2(int n){ // base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // all other odd numbers are not powers of 2 else if (n % 2 != 0 || n ==0) return false; // recursive function call return powerOf2(n / 2);} // Driver Codeint main(){ int n = 64;//True int m = 12;//False if (powerOf2(n) == 1) cout << "True" << endl; else cout << "False" << endl; if (powerOf2(m) == 1) cout << "True" << endl; else cout << "False" << endl;} //code contributed by Moukthik a.k.a rowdyninja
// Java program for// the above approachimport java.util.*;class GFG{ // Function which checks// whether a number is a// power of 2static boolean powerOf2(int n){ // base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // all other odd numbers are // not powers of 2 else if (n % 2 != 0 || n ==0) return false; // recursive function call return powerOf2(n / 2);} // Driver Codepublic static void main(String[] args){ //True int n = 64; //False int m = 12; if (powerOf2(n) == true) System.out.print("True" + "\n"); else System.out.print("False" + "\n"); if (powerOf2(m) == true) System.out.print("True" + "\n"); else System.out.print("False" + "\n");}} // This code is contributed by Princi Singh
# Python program for above approach # function which checks whether a# number is a power of 2def powerof2(n): # base cases # '1' is the only odd number # which is a power of 2(2^0) if n == 1: return True # all other odd numbers are not powers of 2 elif n%2 != 0 or n == 0: return False #recursive function call return powerof2(n/2) # Driver Codeif __name__ == "__main__": print(powerof2(64)) #True print(powerof2(12)) #False #code contributed by Moukthik a.k.a rowdyninja
// C# program for above approachusing System; class GFG{ // Function which checks whether a// number is a power of 2static bool powerOf2(int n){ // Base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // All other odd numbers // are not powers of 2 else if (n % 2 != 0 || n == 0) return false; // Recursive function call return powerOf2(n / 2);} // Driver codestatic void Main(){ int n = 64;//True int m = 12;//False if (powerOf2(n)) { Console.Write("True" + "\n"); } else { Console.Write("False" + "\n"); } if (powerOf2(m)) { Console.Write("True"); } else { Console.Write("False"); }}} // This code is contributed by rutvik_56
<script> // javascript program for// the above approach // Function which checks// whether a number is a// power of 2 function powerOf2(n){ // base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // all other odd numbers are // not powers of 2 else if (n % 2 != 0 || n ==0) return false; // recursive function call return powerOf2(n / 2);} // Driver Code//Truevar n = 64; //Falsevar m = 12; if (powerOf2(n) == true) document.write("True" + "\n");else document.write("False" + "\n"); if (powerOf2(m) == true) document.write("True" + "\n");else document.write("False" + "\n"); // This code contributed by shikhasingrajput </script>
True
False
Time Complexity: O(log2n)
Auxiliary Space: O(log2n)
4. All power of two numbers has only a one-bit set. So count the no. of set bits and if you get 1 then the number is a power of 2. Please see Count set bits in an integer for counting set bits.
C++
Java
C#
Python3
Javascript
#include <bits/stdc++.h>using namespace std;#define bool int /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ /* First x in the below expression is for the case when * x is 0 */ int cnt = 0; while (n > 0) { if ((n & 1) == 1) { cnt++; } n = n >> 1;// keep dividing n by 2 using right // shift operator } if (cnt == 1) {// if cnt = 1 only then it is power of 2 return true; } return false;} /*Driver code*/int main(){ isPowerOfTwo(31) ? cout << "Yes\n" : cout << "No\n"; isPowerOfTwo(64) ? cout << "Yes\n" : cout << "No\n"; return 0;} // This code is contributed by devendra salunke
// Java program of the above approachimport java.io.*; class GFG { // Function to check if x is power of 2 static boolean isPowerofTwo(int n) { int cnt = 0; while (n > 0) { if ((n & 1) == 1) { cnt++; // if n&1 == 1 keep incrementing cnt // variable } n = n >> 1; // keep dividing n by 2 using right // shift operator } if (cnt == 1) { // if cnt = 1 only then it is power of 2 return true; } return false; } public static void main(String[] args) { if (isPowerofTwo(30) == true) System.out.println("Yes"); else System.out.println("No"); if (isPowerofTwo(128) == true) System.out.println("Yes"); else System.out.println("No"); }} // This code is contributed by devendra salunke.
// C# program to check for power for 2using System; class GFG { // Method to check if x is power of 2 static bool isPowerOfTwo(int n) { int cnt = 0; // initialize count to 0 while (n > 0) { // run loop till n > 0 if ((n & 1) == 1) { // if n&1 == 1 keep incrementing cnt // variable cnt++; } n = n >> 1; // keep dividing n by 2 using right // shift operator } if (cnt == 1) // if cnt = 1 only then it is power of 2 return true; return false; } // Driver method public static void Main() { Console.WriteLine(isPowerOfTwo(31) ? "Yes" : "No"); Console.WriteLine(isPowerOfTwo(64) ? "Yes" : "No"); }} // This code is contributed by devendra salunke
# Python program to check if given# number is power of 2 or not # Function to check if x is power of 2 def isPowerOfTwo(n): cnt = 0 while n > 0: if n & 1 == 1: cnt = cnt + 1 n = n >> 1 if cnt == 1 : return 1 return 0 # Driver codeif(isPowerOfTwo(31)): print('Yes')else: print('No') if(isPowerOfTwo(64)): print('Yes')else: print('No') # This code is contributed by devendra salunke
<script> // JavaScript code for the above approach // Function to check if x is power of 2 function isPowerofTwo(n) { let cnt = 0; while (n > 0) { if ((n & 1) == 1) { cnt++; // if n&1 == 1 keep incrementing cnt // variable } n = n >> 1; // keep dividing n by 2 using right // shift operator } if (cnt == 1) { // if cnt = 1 only then it is power of 2 return true; } return false; } // Driver code if (isPowerofTwo(30) == true) document.write("Yes" + "<br/>"); else document.write("No" + "<br/>"); if (isPowerofTwo(128) == true) document.write("Yes" + "<br/>"); else document.write("No" + "<br/>"); // This code is contributed by sanjoy_62. </script>
Output :
No
Yes
Time complexity : O(N)
Space Complexity : O(1)
5. If we subtract a power of 2 numbers by 1 then all unset bits after the only set bit become set; and the set bit becomes unset.For example for 4 ( 100) and 16(10000), we get the following after subtracting 1 3 β> 011 15 β> 01111
So, if a number n is a power of 2 then bitwise & of n and n-1 will be zero. We can say n is a power of 2 or not based on the value of n&(n-1). The expression n&(n-1) will not work when n is 0. To handle this case also, our expression will become n& (!n&(n-1)) (thanks to https://www.geeksforgeeks.org/program-to-find-whether-a-no-is-power-of-two/Mohammad for adding this case).
Below is the implementation of this method.
Time complexity : O(1)
Space complexity : O(1)
C++
C
Java
Python3
C#
PHP
Javascript
#include <bits/stdc++.h>using namespace std;#define bool int /* Function to check if x is power of 2*/bool isPowerOfTwo (int x){ /* First x in the below expression is for the case when x is 0 */ return x && (!(x&(x-1)));} /*Driver code*/int main(){ isPowerOfTwo(31)? cout<<"Yes\n": cout<<"No\n"; isPowerOfTwo(64)? cout<<"Yes\n": cout<<"No\n"; return 0;} // This code is contributed by rathbhupendra
#include<stdio.h>#define bool int /* Function to check if x is power of 2*/bool isPowerOfTwo (int x){ /* First x in the below expression is for the case when x is 0 */ return x && (!(x&(x-1)));} /*Driver program to test above function*/int main(){ isPowerOfTwo(31)? printf("Yes\n"): printf("No\n"); isPowerOfTwo(64)? printf("Yes\n"): printf("No\n"); return 0;}
// Java program to efficiently// check for power for 2 class Test{ /* Method to check if x is power of 2*/ static boolean isPowerOfTwo (int x) { /* First x in the below expression is for the case when x is 0 */ return x!=0 && ((x&(x-1)) == 0); } // Driver method public static void main(String[] args) { System.out.println(isPowerOfTwo(31) ? "Yes" : "No"); System.out.println(isPowerOfTwo(64) ? "Yes" : "No"); }}// This program is contributed by Gaurav Miglani
# Python program to check if given# number is power of 2 or not # Function to check if x is power of 2def isPowerOfTwo (x): # First x in the below expression # is for the case when x is 0 return (x and (not(x & (x - 1))) ) # Driver codeif(isPowerOfTwo(31)): print('Yes')else: print('No') if(isPowerOfTwo(64)): print('Yes')else: print('No') # This code is contributed by Danish Raza
// C# program to efficiently// check for power for 2using System; class GFG{ // Method to check if x is power of 2 static bool isPowerOfTwo (int x) { // First x in the below expression // is for the case when x is 0 return x != 0 && ((x & (x - 1)) == 0); } // Driver method public static void Main() { Console.WriteLine(isPowerOfTwo(31) ? "Yes" : "No"); Console.WriteLine(isPowerOfTwo(64) ? "Yes" : "No"); }} // This code is contributed by Sam007
<?php// PHP program to efficiently// check for power for 2 // Function to check if// x is power of 2function isPowerOfTwo ($x){// First x in the below expression// is for the case when x is 0return $x && (!($x & ($x - 1)));} // Driver Codeif(isPowerOfTwo(31)) echo "Yes\n" ;else echo "No\n"; if(isPowerOfTwo(64)) echo "Yes\n" ;else echo "No\n"; // This code is contributed by Sam007?>
<script> // JavaScript program to efficiently// check for power for 2 /* Method to check if x is power of 2*/ function isPowerOfTwo (x) { /* First x in the below expression is for the case when x is 0 */ return x!=0 && ((x&(x-1)) == 0); } // Driver methoddocument.write(isPowerOfTwo(31) ? "Yes" : "No");document.write("<br>"+(isPowerOfTwo(64) ? "Yes" : "No")); // This code is contributed by 29AjayKumar </script>
Output :
No
Yes
Time Complexity: O(1)
Auxiliary Space: O(1)
6. Another way is to use the logic to find the rightmost bit set of a given number.
C++
Java
Python3
C#
Javascript
#include <iostream>using namespace std; /* Function to check if x is power of 2*/bool isPowerofTwo(long long n){ if (n == 0) return 0; if ((n & (~(n - 1))) == n) return 1; return 0;}/*Driver code*/int main(){ isPowerofTwo(30) ? cout << "Yes\n" : cout << "No\n"; isPowerofTwo(128) ? cout << "Yes\n" : cout << "No\n"; return 0;}// This code is contributed by Sachin
// Java program of the above approachimport java.io.*; class GFG { // Function to check if x is power of 2 static boolean isPowerofTwo(int n) { if (n == 0) return false; if ((n & (~(n - 1))) == n) return true; return false; } public static void main(String[] args) { if (isPowerofTwo(30) == true) System.out.println("Yes"); else System.out.println("No"); if (isPowerofTwo(128) == true) System.out.println("Yes"); else System.out.println("No"); }} // This code is contributed by rajsanghavi9.
# Python program of the above approach # Function to check if x is power of 2*/def isPowerofTwo(n): if (n == 0): return 0 if ((n & (~(n - 1))) == n): return 1 return 0 # Driver codeif(isPowerofTwo(30)): print('Yes')else: print('No') if(isPowerofTwo(128)): print('Yes')else: print('No') # This code is contributed by shivanisinghss2110
// C# program of the above approach using System;public class GFG { // Function to check if x is power of 2 static bool isPowerofTwo(int n) { if (n == 0) return false; if ((n & (~(n - 1))) == n) return true; return false; } public static void Main(String[] args) { if (isPowerofTwo(30) == true) Console.WriteLine("Yes"); else Console.WriteLine("No"); if (isPowerofTwo(128) == true) Console.WriteLine("Yes"); else Console.WriteLine("No"); }} // This code contributed by gauravrajput1
<script>// javascript program of the above approach // Function to check if x is power of 2 function isPowerofTwo(n) { if (n == 0) return false; if ((n & (~(n - 1))) == n) return true; return false; } if (isPowerofTwo(30) == true) document.write("Yes<br/>"); else document.write("No<br/>"); if (isPowerofTwo(128) == true) document.write("Yes<br/>"); else document.write("No<br/>"); // This code is contributed by umadevi9616</script>
No
Yes
Time complexity : O(1)
Space complexity : O(1)
7. Brian Kernighanβs algorithm ( Efficient Method )
Approach :
As we know that the number which will be the power of two have only one set bit , therefore when we do bitwise and with the number which is just less than the number which can be represented as the power of (2) then the result will be 0 .
Example : 4 can be represented as (2^2 ) ,
(4 & 3)=0 or in binary (100 & 011=0)
Here is the code of the given approach :
C++
Java
C#
// C++ program to check whether the given number is power of// 2#include <iostream>using namespace std;/* Function to check if x is power of 2*/bool isPowerofTwo(long long n){ return (n != 0) && ((n & (n - 1)) == 0);}/*Driver code*/int main(){ isPowerofTwo(30) ? cout << "Yes\n" : cout << "No\n"; isPowerofTwo(128) ? cout << "Yes\n" : cout << "No\n"; return 0;}// This code is contributed by Suruchi Kumari
/*package whatever //do not write package name here */import java.io.*;class GFG { /* Function to check if x is power of 2*/ public static boolean isPowerofTwo(long n) { return (n != 0) && ((n & (n - 1)) == 0); } public static void main (String[] args) { if(isPowerofTwo(30)) { System.out.println("Yes"); } else { System.out.println("No"); } if(isPowerofTwo(128)) { System.out.println("Yes"); } else { System.out.println("No"); } }} // This code is contributed by akashish__
using System; public class GFG{ /* Function to check if x is power of 2*/ static public bool isPowerofTwo(ulong n){ return (n != 0) && ((n & (n - 1)) == 0);} static public void Main (){ if(isPowerofTwo(30)) { System.Console.WriteLine("Yes"); } else { System.Console.WriteLine("No"); } if(isPowerofTwo(128)) { System.Console.WriteLine("Yes"); } else { System.Console.WriteLine("No"); } }} // This code is contributed by akashish__
Output :
No
Yes
Time Complexity : O(1)
Auxiliary Space: O(1)
Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
Sam007
Mithun Kumar
Akanksha_Rai
SURENDRA_GANGWAR
SumitJadiya
rathbhupendra
rowdyninja
rutvik_56
princi singh
rishavmahato348
ujjwalmittal
divyeshrabadiya07
shikhasingrajput
29AjayKumar
skm22
amvsachin
rajsanghavi9
GauravRajput1
umadevi9616
subhammahato348
shivanisinghss2110
swayambhusamiksha1
dark_hunter
amartyaghoshgfg
devendrasalunke
suruchikumarimfp4
varshagumber28
akashish__
sanjoy_62
FactSet
Qualcomm
Samsung
SAP Labs
Bit Magic
Mathematical
Samsung
FactSet
SAP Labs
Qualcomm
Mathematical
Bit Magic
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 52,
"s": 24,
"text": "\n11 Jul, 2022"
},
{
"code": null,
"e": 146,
"s": 52,
"text": "Given a positive integer, write a function to find if it is a power of two or not.Examples : "
},
{
"code": null,
"e": 244,
"s": 146,
"text": "Input : n = 4\nOutput : Yes\n22 = 4\n\nInput : n = 7\nOutput : No\n\nInput : n = 32\nOutput : Yes\n25 = 32"
},
{
"code": null,
"e": 382,
"s": 244,
"text": "1. A simple method for this is to simply take the log of the number on base 2 and if you get an integer then the number is the power of 2"
},
{
"code": null,
"e": 386,
"s": 382,
"text": "C++"
},
{
"code": null,
"e": 388,
"s": 386,
"text": "C"
},
{
"code": null,
"e": 393,
"s": 388,
"text": "Java"
},
{
"code": null,
"e": 401,
"s": 393,
"text": "Python3"
},
{
"code": null,
"e": 404,
"s": 401,
"text": "C#"
},
{
"code": null,
"e": 408,
"s": 404,
"text": "PHP"
},
{
"code": null,
"e": 419,
"s": 408,
"text": "Javascript"
},
{
"code": "// C++ Program to find whether a// no is power of two#include<bits/stdc++.h>using namespace std; // Function to check if x is power of 2bool isPowerOfTwo(int n){ if(n==0) return false; return (ceil(log2(n)) == floor(log2(n)));} // Driver programint main(){ isPowerOfTwo(31)? cout<<\"Yes\"<<endl: cout<<\"No\"<<endl; isPowerOfTwo(64)? cout<<\"Yes\"<<endl: cout<<\"No\"<<endl; return 0;} // This code is contributed by Surendra_Gangwar",
"e": 862,
"s": 419,
"text": null
},
{
"code": "// C Program to find whether a// no is power of two#include<stdio.h>#include<stdbool.h>#include<math.h> /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ if(n==0) return false; return (ceil(log2(n)) == floor(log2(n)));} // Driver programint main(){ isPowerOfTwo(31)? printf(\"Yes\\n\"): printf(\"No\\n\"); isPowerOfTwo(64)? printf(\"Yes\\n\"): printf(\"No\\n\"); return 0;} // This code is contributed by bibhudhendra",
"e": 1301,
"s": 862,
"text": null
},
{
"code": "// Java Program to find whether a// no is power of twoclass GFG{/* Function to check if x is power of 2*/static boolean isPowerOfTwo(int n){ if(n==0) return false; return (int)(Math.ceil((Math.log(n) / Math.log(2)))) == (int)(Math.floor(((Math.log(n) / Math.log(2)))));} // Driver Codepublic static void main(String[] args){ if(isPowerOfTwo(31)) System.out.println(\"Yes\"); else System.out.println(\"No\"); if(isPowerOfTwo(64)) System.out.println(\"Yes\"); else System.out.println(\"No\");}} // This code is contributed by mits",
"e": 1863,
"s": 1301,
"text": null
},
{
"code": "# Python3 Program to find# whether a no is# power of twoimport math # Function to check# Log base 2def Log2(x): if x == 0: return false; return (math.log10(x) / math.log10(2)); # Function to check# if x is power of 2def isPowerOfTwo(n): return (math.ceil(Log2(n)) == math.floor(Log2(n))); # Driver Codeif(isPowerOfTwo(31)): print(\"Yes\");else: print(\"No\"); if(isPowerOfTwo(64)): print(\"Yes\");else: print(\"No\"); # This code is contributed# by mits",
"e": 2364,
"s": 1863,
"text": null
},
{
"code": "// C# Program to find whether// a no is power of twousing System; class GFG{ /* Function to check if x is power of 2*/static bool isPowerOfTwo(int n){ if(n==0) return false; return (int)(Math.Ceiling((Math.Log(n) / Math.Log(2)))) == (int)(Math.Floor(((Math.Log(n) / Math.Log(2)))));} // Driver Codepublic static void Main(){ if(isPowerOfTwo(31)) Console.WriteLine(\"Yes\"); else Console.WriteLine(\"No\"); if(isPowerOfTwo(64)) Console.WriteLine(\"Yes\"); else Console.WriteLine(\"No\");}} // This code is contributed// by Akanksha Rai(Abby_akku)",
"e": 3034,
"s": 2364,
"text": null
},
{
"code": "<?php// PHP Program to find// whether a no is// power of two // Function to check// Log base 2function Log2($x){ return (log10($x) / log10(2));} // Function to check// if x is power of 2function isPowerOfTwo($n){ return (ceil(Log2($n)) == floor(Log2($n)));} // Driver Codeif(isPowerOfTwo(31))echo \"Yes\\n\";elseecho \"No\\n\"; if(isPowerOfTwo(64))echo \"Yes\\n\";elseecho \"No\\n\"; // This code is contributed// by Sam007?>",
"e": 3481,
"s": 3034,
"text": null
},
{
"code": "<script>// javascript Program to find whether a// no is power of two /* Function to check if x is power of 2 */ function isPowerOfTwo(n) { if (n == 0) return false; return parseInt( (Math.ceil((Math.log(n) / Math.log(2))))) == parseInt( (Math.floor(((Math.log(n) / Math.log(2)))))); } // Driver Code if (isPowerOfTwo(31)) document.write(\"Yes<br/>\"); else document.write(\"No<br/>\"); if (isPowerOfTwo(64)) document.write(\"Yes<br/>\"); else document.write(\"No<br/>\"); // This code is contributed by shikhasingrajput.</script>",
"e": 4092,
"s": 3481,
"text": null
},
{
"code": null,
"e": 4102,
"s": 4092,
"text": "Output: "
},
{
"code": null,
"e": 4109,
"s": 4102,
"text": "No\nYes"
},
{
"code": null,
"e": 4152,
"s": 4109,
"text": "Time Complexity: O(1)Auxiliary Space: O(1)"
},
{
"code": null,
"e": 4366,
"s": 4152,
"text": "2. Another solution is to keep dividing the number by two, i.e, do n = n/2 iteratively. In any iteration, if n%2 becomes non-zero and n is not 1 then n is not a power of 2. If n becomes 1 then it is a power of 2. "
},
{
"code": null,
"e": 4370,
"s": 4366,
"text": "C++"
},
{
"code": null,
"e": 4372,
"s": 4370,
"text": "C"
},
{
"code": null,
"e": 4377,
"s": 4372,
"text": "Java"
},
{
"code": null,
"e": 4385,
"s": 4377,
"text": "Python3"
},
{
"code": null,
"e": 4388,
"s": 4385,
"text": "C#"
},
{
"code": null,
"e": 4392,
"s": 4388,
"text": "PHP"
},
{
"code": null,
"e": 4403,
"s": 4392,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std; /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1;} /*Driver code*/int main(){ isPowerOfTwo(31)? cout<<\"Yes\\n\": cout<<\"No\\n\"; isPowerOfTwo(64)? cout<<\"Yes\\n\": cout<<\"No\\n\"; return 0;} // This code is contributed by rathbhupendra",
"e": 4833,
"s": 4403,
"text": null
},
{
"code": "#include<stdio.h>#include<stdbool.h> /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1;} /*Driver program to test above function*/int main(){ isPowerOfTwo(31)? printf(\"Yes\\n\"): printf(\"No\\n\"); isPowerOfTwo(64)? printf(\"Yes\\n\"): printf(\"No\\n\"); return 0;}",
"e": 5217,
"s": 4833,
"text": null
},
{
"code": "// Java program to find whether// a no is power of twoimport java.io.*; class GFG { // Function to check if // x is power of 2 static boolean isPowerOfTwo(int n) { if (n == 0) return false; while (n != 1) { if (n % 2 != 0) return false; n = n / 2; } return true; } // Driver program public static void main(String args[]) { if (isPowerOfTwo(31)) System.out.println(\"Yes\"); else System.out.println(\"No\"); if (isPowerOfTwo(64)) System.out.println(\"Yes\"); else System.out.println(\"No\"); }} // This code is contributed by Nikita tiwari.",
"e": 5944,
"s": 5217,
"text": null
},
{
"code": "# Python program to check if given# number is power of 2 or not # Function to check if x is power of 2def isPowerOfTwo(n): if (n == 0): return False while (n != 1): if (n % 2 != 0): return False n = n // 2 return True # Driver codeif(isPowerOfTwo(31)): print('Yes')else: print('No')if(isPowerOfTwo(64)): print('Yes')else: print('No') # This code is contributed by Danish Raza",
"e": 6398,
"s": 5944,
"text": null
},
{
"code": "// C# program to find whether// a no is power of twousing System; class GFG{ // Function to check if // x is power of 2 static bool isPowerOfTwo(int n) { if (n == 0) return false; while (n != 1) { if (n % 2 != 0) return false; n = n / 2; } return true; } // Driver program public static void Main() { Console.WriteLine(isPowerOfTwo(31) ? \"Yes\" : \"No\"); Console.WriteLine(isPowerOfTwo(64) ? \"Yes\" : \"No\"); }} // This code is contributed by Sam007",
"e": 6994,
"s": 6398,
"text": null
},
{
"code": "<?php // Function to check if// x is power of 2function isPowerOfTwo($n){if ($n == 0) return 0;while ($n != 1){ if ($n % 2 != 0) return 0; $n = $n / 2;}return 1;} // Driver Codeif(isPowerOfTwo(31)) echo \"Yes\\n\";else echo \"No\\n\"; if(isPowerOfTwo(64)) echo \"Yes\\n\";else echo \"No\\n\"; // This code is contributed// by Sam007?>",
"e": 7345,
"s": 6994,
"text": null
},
{
"code": "<script> /* Function to check if x is power of 2*/ function isPowerOfTwo(n) { if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1; } isPowerOfTwo(31)? document.write(\"Yes\" + \"</br>\"): document.write(\"No\" + \"</br>\"); isPowerOfTwo(64)? document.write(\"Yes\"): document.write(\"No\"); </script>",
"e": 7773,
"s": 7345,
"text": null
},
{
"code": null,
"e": 7784,
"s": 7773,
"text": "Output : "
},
{
"code": null,
"e": 7791,
"s": 7784,
"text": "No\nYes"
},
{
"code": null,
"e": 7817,
"s": 7791,
"text": "Time Complexity: O(log2n)"
},
{
"code": null,
"e": 7839,
"s": 7817,
"text": "Auxiliary Space: O(1)"
},
{
"code": null,
"e": 7993,
"s": 7839,
"text": "3. Another way is to use this simple recursive solution. It uses the same logic as the above iterative solution but uses recursion instead of iteration. "
},
{
"code": null,
"e": 7997,
"s": 7993,
"text": "C++"
},
{
"code": null,
"e": 8002,
"s": 7997,
"text": "Java"
},
{
"code": null,
"e": 8010,
"s": 8002,
"text": "Python3"
},
{
"code": null,
"e": 8013,
"s": 8010,
"text": "C#"
},
{
"code": null,
"e": 8024,
"s": 8013,
"text": "Javascript"
},
{
"code": "// C++ program for above approach#include <bits/stdc++.h>using namespace std; // Function which checks whether a// number is a power of 2bool powerOf2(int n){ // base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // all other odd numbers are not powers of 2 else if (n % 2 != 0 || n ==0) return false; // recursive function call return powerOf2(n / 2);} // Driver Codeint main(){ int n = 64;//True int m = 12;//False if (powerOf2(n) == 1) cout << \"True\" << endl; else cout << \"False\" << endl; if (powerOf2(m) == 1) cout << \"True\" << endl; else cout << \"False\" << endl;} //code contributed by Moukthik a.k.a rowdyninja",
"e": 8768,
"s": 8024,
"text": null
},
{
"code": "// Java program for// the above approachimport java.util.*;class GFG{ // Function which checks// whether a number is a// power of 2static boolean powerOf2(int n){ // base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // all other odd numbers are // not powers of 2 else if (n % 2 != 0 || n ==0) return false; // recursive function call return powerOf2(n / 2);} // Driver Codepublic static void main(String[] args){ //True int n = 64; //False int m = 12; if (powerOf2(n) == true) System.out.print(\"True\" + \"\\n\"); else System.out.print(\"False\" + \"\\n\"); if (powerOf2(m) == true) System.out.print(\"True\" + \"\\n\"); else System.out.print(\"False\" + \"\\n\");}} // This code is contributed by Princi Singh",
"e": 9556,
"s": 8768,
"text": null
},
{
"code": "# Python program for above approach # function which checks whether a# number is a power of 2def powerof2(n): # base cases # '1' is the only odd number # which is a power of 2(2^0) if n == 1: return True # all other odd numbers are not powers of 2 elif n%2 != 0 or n == 0: return False #recursive function call return powerof2(n/2) # Driver Codeif __name__ == \"__main__\": print(powerof2(64)) #True print(powerof2(12)) #False #code contributed by Moukthik a.k.a rowdyninja",
"e": 10088,
"s": 9556,
"text": null
},
{
"code": "// C# program for above approachusing System; class GFG{ // Function which checks whether a// number is a power of 2static bool powerOf2(int n){ // Base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // All other odd numbers // are not powers of 2 else if (n % 2 != 0 || n == 0) return false; // Recursive function call return powerOf2(n / 2);} // Driver codestatic void Main(){ int n = 64;//True int m = 12;//False if (powerOf2(n)) { Console.Write(\"True\" + \"\\n\"); } else { Console.Write(\"False\" + \"\\n\"); } if (powerOf2(m)) { Console.Write(\"True\"); } else { Console.Write(\"False\"); }}} // This code is contributed by rutvik_56",
"e": 10900,
"s": 10088,
"text": null
},
{
"code": "<script> // javascript program for// the above approach // Function which checks// whether a number is a// power of 2 function powerOf2(n){ // base cases // '1' is the only odd number // which is a power of 2(2^0) if (n == 1) return true; // all other odd numbers are // not powers of 2 else if (n % 2 != 0 || n ==0) return false; // recursive function call return powerOf2(n / 2);} // Driver Code//Truevar n = 64; //Falsevar m = 12; if (powerOf2(n) == true) document.write(\"True\" + \"\\n\");else document.write(\"False\" + \"\\n\"); if (powerOf2(m) == true) document.write(\"True\" + \"\\n\");else document.write(\"False\" + \"\\n\"); // This code contributed by shikhasingrajput </script>",
"e": 11605,
"s": 10900,
"text": null
},
{
"code": null,
"e": 11616,
"s": 11605,
"text": "True\nFalse"
},
{
"code": null,
"e": 11642,
"s": 11616,
"text": "Time Complexity: O(log2n)"
},
{
"code": null,
"e": 11668,
"s": 11642,
"text": "Auxiliary Space: O(log2n)"
},
{
"code": null,
"e": 11862,
"s": 11668,
"text": "4. All power of two numbers has only a one-bit set. So count the no. of set bits and if you get 1 then the number is a power of 2. Please see Count set bits in an integer for counting set bits."
},
{
"code": null,
"e": 11866,
"s": 11862,
"text": "C++"
},
{
"code": null,
"e": 11871,
"s": 11866,
"text": "Java"
},
{
"code": null,
"e": 11874,
"s": 11871,
"text": "C#"
},
{
"code": null,
"e": 11882,
"s": 11874,
"text": "Python3"
},
{
"code": null,
"e": 11893,
"s": 11882,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std;#define bool int /* Function to check if x is power of 2*/bool isPowerOfTwo(int n){ /* First x in the below expression is for the case when * x is 0 */ int cnt = 0; while (n > 0) { if ((n & 1) == 1) { cnt++; } n = n >> 1;// keep dividing n by 2 using right // shift operator } if (cnt == 1) {// if cnt = 1 only then it is power of 2 return true; } return false;} /*Driver code*/int main(){ isPowerOfTwo(31) ? cout << \"Yes\\n\" : cout << \"No\\n\"; isPowerOfTwo(64) ? cout << \"Yes\\n\" : cout << \"No\\n\"; return 0;} // This code is contributed by devendra salunke",
"e": 12589,
"s": 11893,
"text": null
},
{
"code": "// Java program of the above approachimport java.io.*; class GFG { // Function to check if x is power of 2 static boolean isPowerofTwo(int n) { int cnt = 0; while (n > 0) { if ((n & 1) == 1) { cnt++; // if n&1 == 1 keep incrementing cnt // variable } n = n >> 1; // keep dividing n by 2 using right // shift operator } if (cnt == 1) { // if cnt = 1 only then it is power of 2 return true; } return false; } public static void main(String[] args) { if (isPowerofTwo(30) == true) System.out.println(\"Yes\"); else System.out.println(\"No\"); if (isPowerofTwo(128) == true) System.out.println(\"Yes\"); else System.out.println(\"No\"); }} // This code is contributed by devendra salunke.",
"e": 13511,
"s": 12589,
"text": null
},
{
"code": "// C# program to check for power for 2using System; class GFG { // Method to check if x is power of 2 static bool isPowerOfTwo(int n) { int cnt = 0; // initialize count to 0 while (n > 0) { // run loop till n > 0 if ((n & 1) == 1) { // if n&1 == 1 keep incrementing cnt // variable cnt++; } n = n >> 1; // keep dividing n by 2 using right // shift operator } if (cnt == 1) // if cnt = 1 only then it is power of 2 return true; return false; } // Driver method public static void Main() { Console.WriteLine(isPowerOfTwo(31) ? \"Yes\" : \"No\"); Console.WriteLine(isPowerOfTwo(64) ? \"Yes\" : \"No\"); }} // This code is contributed by devendra salunke",
"e": 14352,
"s": 13511,
"text": null
},
{
"code": "# Python program to check if given# number is power of 2 or not # Function to check if x is power of 2 def isPowerOfTwo(n): cnt = 0 while n > 0: if n & 1 == 1: cnt = cnt + 1 n = n >> 1 if cnt == 1 : return 1 return 0 # Driver codeif(isPowerOfTwo(31)): print('Yes')else: print('No') if(isPowerOfTwo(64)): print('Yes')else: print('No') # This code is contributed by devendra salunke",
"e": 14795,
"s": 14352,
"text": null
},
{
"code": "<script> // JavaScript code for the above approach // Function to check if x is power of 2 function isPowerofTwo(n) { let cnt = 0; while (n > 0) { if ((n & 1) == 1) { cnt++; // if n&1 == 1 keep incrementing cnt // variable } n = n >> 1; // keep dividing n by 2 using right // shift operator } if (cnt == 1) { // if cnt = 1 only then it is power of 2 return true; } return false; } // Driver code if (isPowerofTwo(30) == true) document.write(\"Yes\" + \"<br/>\"); else document.write(\"No\" + \"<br/>\"); if (isPowerofTwo(128) == true) document.write(\"Yes\" + \"<br/>\"); else document.write(\"No\" + \"<br/>\"); // This code is contributed by sanjoy_62. </script>",
"e": 15709,
"s": 14795,
"text": null
},
{
"code": null,
"e": 15719,
"s": 15709,
"text": "Output : "
},
{
"code": null,
"e": 15726,
"s": 15719,
"text": "No\nYes"
},
{
"code": null,
"e": 15749,
"s": 15726,
"text": "Time complexity : O(N)"
},
{
"code": null,
"e": 15773,
"s": 15749,
"text": "Space Complexity : O(1)"
},
{
"code": null,
"e": 16004,
"s": 15773,
"text": "5. If we subtract a power of 2 numbers by 1 then all unset bits after the only set bit become set; and the set bit becomes unset.For example for 4 ( 100) and 16(10000), we get the following after subtracting 1 3 β> 011 15 β> 01111"
},
{
"code": null,
"e": 16383,
"s": 16004,
"text": "So, if a number n is a power of 2 then bitwise & of n and n-1 will be zero. We can say n is a power of 2 or not based on the value of n&(n-1). The expression n&(n-1) will not work when n is 0. To handle this case also, our expression will become n& (!n&(n-1)) (thanks to https://www.geeksforgeeks.org/program-to-find-whether-a-no-is-power-of-two/Mohammad for adding this case). "
},
{
"code": null,
"e": 16429,
"s": 16383,
"text": "Below is the implementation of this method. "
},
{
"code": null,
"e": 16452,
"s": 16429,
"text": "Time complexity : O(1)"
},
{
"code": null,
"e": 16477,
"s": 16452,
"text": "Space complexity : O(1) "
},
{
"code": null,
"e": 16481,
"s": 16477,
"text": "C++"
},
{
"code": null,
"e": 16483,
"s": 16481,
"text": "C"
},
{
"code": null,
"e": 16488,
"s": 16483,
"text": "Java"
},
{
"code": null,
"e": 16496,
"s": 16488,
"text": "Python3"
},
{
"code": null,
"e": 16499,
"s": 16496,
"text": "C#"
},
{
"code": null,
"e": 16503,
"s": 16499,
"text": "PHP"
},
{
"code": null,
"e": 16514,
"s": 16503,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std;#define bool int /* Function to check if x is power of 2*/bool isPowerOfTwo (int x){ /* First x in the below expression is for the case when x is 0 */ return x && (!(x&(x-1)));} /*Driver code*/int main(){ isPowerOfTwo(31)? cout<<\"Yes\\n\": cout<<\"No\\n\"; isPowerOfTwo(64)? cout<<\"Yes\\n\": cout<<\"No\\n\"; return 0;} // This code is contributed by rathbhupendra",
"e": 16928,
"s": 16514,
"text": null
},
{
"code": "#include<stdio.h>#define bool int /* Function to check if x is power of 2*/bool isPowerOfTwo (int x){ /* First x in the below expression is for the case when x is 0 */ return x && (!(x&(x-1)));} /*Driver program to test above function*/int main(){ isPowerOfTwo(31)? printf(\"Yes\\n\"): printf(\"No\\n\"); isPowerOfTwo(64)? printf(\"Yes\\n\"): printf(\"No\\n\"); return 0;}",
"e": 17294,
"s": 16928,
"text": null
},
{
"code": "// Java program to efficiently// check for power for 2 class Test{ /* Method to check if x is power of 2*/ static boolean isPowerOfTwo (int x) { /* First x in the below expression is for the case when x is 0 */ return x!=0 && ((x&(x-1)) == 0); } // Driver method public static void main(String[] args) { System.out.println(isPowerOfTwo(31) ? \"Yes\" : \"No\"); System.out.println(isPowerOfTwo(64) ? \"Yes\" : \"No\"); }}// This program is contributed by Gaurav Miglani ",
"e": 17840,
"s": 17294,
"text": null
},
{
"code": "# Python program to check if given# number is power of 2 or not # Function to check if x is power of 2def isPowerOfTwo (x): # First x in the below expression # is for the case when x is 0 return (x and (not(x & (x - 1))) ) # Driver codeif(isPowerOfTwo(31)): print('Yes')else: print('No') if(isPowerOfTwo(64)): print('Yes')else: print('No') # This code is contributed by Danish Raza ",
"e": 18255,
"s": 17840,
"text": null
},
{
"code": "// C# program to efficiently// check for power for 2using System; class GFG{ // Method to check if x is power of 2 static bool isPowerOfTwo (int x) { // First x in the below expression // is for the case when x is 0 return x != 0 && ((x & (x - 1)) == 0); } // Driver method public static void Main() { Console.WriteLine(isPowerOfTwo(31) ? \"Yes\" : \"No\"); Console.WriteLine(isPowerOfTwo(64) ? \"Yes\" : \"No\"); }} // This code is contributed by Sam007",
"e": 18785,
"s": 18255,
"text": null
},
{
"code": "<?php// PHP program to efficiently// check for power for 2 // Function to check if// x is power of 2function isPowerOfTwo ($x){// First x in the below expression// is for the case when x is 0return $x && (!($x & ($x - 1)));} // Driver Codeif(isPowerOfTwo(31)) echo \"Yes\\n\" ;else echo \"No\\n\"; if(isPowerOfTwo(64)) echo \"Yes\\n\" ;else echo \"No\\n\"; // This code is contributed by Sam007?>",
"e": 19190,
"s": 18785,
"text": null
},
{
"code": "<script> // JavaScript program to efficiently// check for power for 2 /* Method to check if x is power of 2*/ function isPowerOfTwo (x) { /* First x in the below expression is for the case when x is 0 */ return x!=0 && ((x&(x-1)) == 0); } // Driver methoddocument.write(isPowerOfTwo(31) ? \"Yes\" : \"No\");document.write(\"<br>\"+(isPowerOfTwo(64) ? \"Yes\" : \"No\")); // This code is contributed by 29AjayKumar </script>",
"e": 19647,
"s": 19190,
"text": null
},
{
"code": null,
"e": 19658,
"s": 19647,
"text": "Output : "
},
{
"code": null,
"e": 19665,
"s": 19658,
"text": "No\nYes"
},
{
"code": null,
"e": 19687,
"s": 19665,
"text": "Time Complexity: O(1)"
},
{
"code": null,
"e": 19709,
"s": 19687,
"text": "Auxiliary Space: O(1)"
},
{
"code": null,
"e": 19793,
"s": 19709,
"text": "6. Another way is to use the logic to find the rightmost bit set of a given number."
},
{
"code": null,
"e": 19797,
"s": 19793,
"text": "C++"
},
{
"code": null,
"e": 19802,
"s": 19797,
"text": "Java"
},
{
"code": null,
"e": 19810,
"s": 19802,
"text": "Python3"
},
{
"code": null,
"e": 19813,
"s": 19810,
"text": "C#"
},
{
"code": null,
"e": 19824,
"s": 19813,
"text": "Javascript"
},
{
"code": "#include <iostream>using namespace std; /* Function to check if x is power of 2*/bool isPowerofTwo(long long n){ if (n == 0) return 0; if ((n & (~(n - 1))) == n) return 1; return 0;}/*Driver code*/int main(){ isPowerofTwo(30) ? cout << \"Yes\\n\" : cout << \"No\\n\"; isPowerofTwo(128) ? cout << \"Yes\\n\" : cout << \"No\\n\"; return 0;}// This code is contributed by Sachin",
"e": 20221,
"s": 19824,
"text": null
},
{
"code": "// Java program of the above approachimport java.io.*; class GFG { // Function to check if x is power of 2 static boolean isPowerofTwo(int n) { if (n == 0) return false; if ((n & (~(n - 1))) == n) return true; return false; } public static void main(String[] args) { if (isPowerofTwo(30) == true) System.out.println(\"Yes\"); else System.out.println(\"No\"); if (isPowerofTwo(128) == true) System.out.println(\"Yes\"); else System.out.println(\"No\"); }} // This code is contributed by rajsanghavi9.",
"e": 20878,
"s": 20221,
"text": null
},
{
"code": "# Python program of the above approach # Function to check if x is power of 2*/def isPowerofTwo(n): if (n == 0): return 0 if ((n & (~(n - 1))) == n): return 1 return 0 # Driver codeif(isPowerofTwo(30)): print('Yes')else: print('No') if(isPowerofTwo(128)): print('Yes')else: print('No') # This code is contributed by shivanisinghss2110",
"e": 21257,
"s": 20878,
"text": null
},
{
"code": "// C# program of the above approach using System;public class GFG { // Function to check if x is power of 2 static bool isPowerofTwo(int n) { if (n == 0) return false; if ((n & (~(n - 1))) == n) return true; return false; } public static void Main(String[] args) { if (isPowerofTwo(30) == true) Console.WriteLine(\"Yes\"); else Console.WriteLine(\"No\"); if (isPowerofTwo(128) == true) Console.WriteLine(\"Yes\"); else Console.WriteLine(\"No\"); }} // This code contributed by gauravrajput1",
"e": 21906,
"s": 21257,
"text": null
},
{
"code": "<script>// javascript program of the above approach // Function to check if x is power of 2 function isPowerofTwo(n) { if (n == 0) return false; if ((n & (~(n - 1))) == n) return true; return false; } if (isPowerofTwo(30) == true) document.write(\"Yes<br/>\"); else document.write(\"No<br/>\"); if (isPowerofTwo(128) == true) document.write(\"Yes<br/>\"); else document.write(\"No<br/>\"); // This code is contributed by umadevi9616</script>",
"e": 22495,
"s": 21906,
"text": null
},
{
"code": null,
"e": 22502,
"s": 22495,
"text": "No\nYes"
},
{
"code": null,
"e": 22525,
"s": 22502,
"text": "Time complexity : O(1)"
},
{
"code": null,
"e": 22550,
"s": 22525,
"text": "Space complexity : O(1) "
},
{
"code": null,
"e": 22603,
"s": 22550,
"text": "7. Brian Kernighanβs algorithm ( Efficient Method )"
},
{
"code": null,
"e": 22614,
"s": 22603,
"text": "Approach :"
},
{
"code": null,
"e": 22854,
"s": 22614,
"text": "As we know that the number which will be the power of two have only one set bit , therefore when we do bitwise and with the number which is just less than the number which can be represented as the power of (2) then the result will be 0 . "
},
{
"code": null,
"e": 22898,
"s": 22854,
"text": "Example : 4 can be represented as (2^2 ) , "
},
{
"code": null,
"e": 22949,
"s": 22898,
"text": " (4 & 3)=0 or in binary (100 & 011=0)"
},
{
"code": null,
"e": 22990,
"s": 22949,
"text": "Here is the code of the given approach :"
},
{
"code": null,
"e": 22994,
"s": 22990,
"text": "C++"
},
{
"code": null,
"e": 22999,
"s": 22994,
"text": "Java"
},
{
"code": null,
"e": 23002,
"s": 22999,
"text": "C#"
},
{
"code": "// C++ program to check whether the given number is power of// 2#include <iostream>using namespace std;/* Function to check if x is power of 2*/bool isPowerofTwo(long long n){ return (n != 0) && ((n & (n - 1)) == 0);}/*Driver code*/int main(){ isPowerofTwo(30) ? cout << \"Yes\\n\" : cout << \"No\\n\"; isPowerofTwo(128) ? cout << \"Yes\\n\" : cout << \"No\\n\"; return 0;}// This code is contributed by Suruchi Kumari",
"e": 23421,
"s": 23002,
"text": null
},
{
"code": "/*package whatever //do not write package name here */import java.io.*;class GFG { /* Function to check if x is power of 2*/ public static boolean isPowerofTwo(long n) { return (n != 0) && ((n & (n - 1)) == 0); } public static void main (String[] args) { if(isPowerofTwo(30)) { System.out.println(\"Yes\"); } else { System.out.println(\"No\"); } if(isPowerofTwo(128)) { System.out.println(\"Yes\"); } else { System.out.println(\"No\"); } }} // This code is contributed by akashish__",
"e": 23964,
"s": 23421,
"text": null
},
{
"code": "using System; public class GFG{ /* Function to check if x is power of 2*/ static public bool isPowerofTwo(ulong n){ return (n != 0) && ((n & (n - 1)) == 0);} static public void Main (){ if(isPowerofTwo(30)) { System.Console.WriteLine(\"Yes\"); } else { System.Console.WriteLine(\"No\"); } if(isPowerofTwo(128)) { System.Console.WriteLine(\"Yes\"); } else { System.Console.WriteLine(\"No\"); } }} // This code is contributed by akashish__",
"e": 24518,
"s": 23964,
"text": null
},
{
"code": null,
"e": 24527,
"s": 24518,
"text": "Output :"
},
{
"code": null,
"e": 24534,
"s": 24527,
"text": "No\nYes"
},
{
"code": null,
"e": 24558,
"s": 24534,
"text": "Time Complexity : O(1) "
},
{
"code": null,
"e": 24580,
"s": 24558,
"text": "Auxiliary Space: O(1)"
},
{
"code": null,
"e": 24706,
"s": 24580,
"text": "Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "
},
{
"code": null,
"e": 24713,
"s": 24706,
"text": "Sam007"
},
{
"code": null,
"e": 24726,
"s": 24713,
"text": "Mithun Kumar"
},
{
"code": null,
"e": 24739,
"s": 24726,
"text": "Akanksha_Rai"
},
{
"code": null,
"e": 24756,
"s": 24739,
"text": "SURENDRA_GANGWAR"
},
{
"code": null,
"e": 24768,
"s": 24756,
"text": "SumitJadiya"
},
{
"code": null,
"e": 24782,
"s": 24768,
"text": "rathbhupendra"
},
{
"code": null,
"e": 24793,
"s": 24782,
"text": "rowdyninja"
},
{
"code": null,
"e": 24803,
"s": 24793,
"text": "rutvik_56"
},
{
"code": null,
"e": 24816,
"s": 24803,
"text": "princi singh"
},
{
"code": null,
"e": 24832,
"s": 24816,
"text": "rishavmahato348"
},
{
"code": null,
"e": 24845,
"s": 24832,
"text": "ujjwalmittal"
},
{
"code": null,
"e": 24863,
"s": 24845,
"text": "divyeshrabadiya07"
},
{
"code": null,
"e": 24880,
"s": 24863,
"text": "shikhasingrajput"
},
{
"code": null,
"e": 24892,
"s": 24880,
"text": "29AjayKumar"
},
{
"code": null,
"e": 24898,
"s": 24892,
"text": "skm22"
},
{
"code": null,
"e": 24908,
"s": 24898,
"text": "amvsachin"
},
{
"code": null,
"e": 24921,
"s": 24908,
"text": "rajsanghavi9"
},
{
"code": null,
"e": 24935,
"s": 24921,
"text": "GauravRajput1"
},
{
"code": null,
"e": 24947,
"s": 24935,
"text": "umadevi9616"
},
{
"code": null,
"e": 24963,
"s": 24947,
"text": "subhammahato348"
},
{
"code": null,
"e": 24982,
"s": 24963,
"text": "shivanisinghss2110"
},
{
"code": null,
"e": 25001,
"s": 24982,
"text": "swayambhusamiksha1"
},
{
"code": null,
"e": 25013,
"s": 25001,
"text": "dark_hunter"
},
{
"code": null,
"e": 25029,
"s": 25013,
"text": "amartyaghoshgfg"
},
{
"code": null,
"e": 25045,
"s": 25029,
"text": "devendrasalunke"
},
{
"code": null,
"e": 25063,
"s": 25045,
"text": "suruchikumarimfp4"
},
{
"code": null,
"e": 25078,
"s": 25063,
"text": "varshagumber28"
},
{
"code": null,
"e": 25089,
"s": 25078,
"text": "akashish__"
},
{
"code": null,
"e": 25099,
"s": 25089,
"text": "sanjoy_62"
},
{
"code": null,
"e": 25107,
"s": 25099,
"text": "FactSet"
},
{
"code": null,
"e": 25116,
"s": 25107,
"text": "Qualcomm"
},
{
"code": null,
"e": 25124,
"s": 25116,
"text": "Samsung"
},
{
"code": null,
"e": 25133,
"s": 25124,
"text": "SAP Labs"
},
{
"code": null,
"e": 25143,
"s": 25133,
"text": "Bit Magic"
},
{
"code": null,
"e": 25156,
"s": 25143,
"text": "Mathematical"
},
{
"code": null,
"e": 25164,
"s": 25156,
"text": "Samsung"
},
{
"code": null,
"e": 25172,
"s": 25164,
"text": "FactSet"
},
{
"code": null,
"e": 25181,
"s": 25172,
"text": "SAP Labs"
},
{
"code": null,
"e": 25190,
"s": 25181,
"text": "Qualcomm"
},
{
"code": null,
"e": 25203,
"s": 25190,
"text": "Mathematical"
},
{
"code": null,
"e": 25213,
"s": 25203,
"text": "Bit Magic"
}
] |
Remove all duplicate adjacent characters from a string using Stack
|
01 Sep, 2021
Given a string, str, the task is to remove all the duplicate adjacent characters from the given string.
Examples:
Input: str= βazxxzyβOutput: ay Removal of βxxβ modifies the string to βazzyβ. Now, the removal of βzzβ modifies the string to βayβ. Since the string βayβ doesnβt contain duplicates, the output is ay.
Input: βaaccddβOutput: Empty String
Recursive Approach: Refer to the article Recursively remove all adjacent duplicates to solve this problem recursively. Time Complexity: O(N)Auxiliary Space: O(N)
String Functions-based Approach: Refer to this article Remove first adjacent pairs of similar characters until possible to solve this problem using inbuilt functions pop_back() and back() methods of string.
Time Complexity: O(N)Auxiliary Space: O(N)
Stack-based Approach: The problem can be solved using Stack to use the property of LIFO. The idea is to traverse the string from left to right and check if the stack is empty or the top element of the stack is not equal to the current character of str, then push the current character into the stack. Otherwise, pop the element from the top of the stack. Follow the steps below to solve the problem:
Create a stack, st to remove the adjacent duplicate characters in str.Traverse the string str and check if the stack is empty or the top element of the stack not equal to the current character. If found to be true, push the current character into st.Otherwise, pop the element from the top of the stack.Finally, print all the remaining elements of the stack.
Create a stack, st to remove the adjacent duplicate characters in str.
Traverse the string str and check if the stack is empty or the top element of the stack not equal to the current character. If found to be true, push the current character into st.
Otherwise, pop the element from the top of the stack.
Finally, print all the remaining elements of the stack.
C++
Java
Python3
C#
Javascript
// C++ program to implement// the above approach#include <bits/stdc++.h>using namespace std; // Function to remove adjacent// duplicate elementsstring ShortenString(string str1){ // Store the string without // duplicate elements stack<char> st; // Store the index of str int i = 0; // Traverse the string str while (i < str1.length()) { // Checks if stack is empty or top of the // stack is not equal to current character if (st.empty() || str1[i] != st.top()) { st.push(str1[i]); i++; } // If top element of the stack is // equal to the current character else { st.pop(); i++; } } // If stack is empty if (st.empty()) { return ("Empty String"); } // If stack is not Empty else { string short_string = ""; while (!st.empty()) { short_string = st.top() + short_string; st.pop(); } return (short_string); }} // Driver Codeint main(){ string str1 ="azzxzy"; cout << ShortenString(str1); return 0;} // This code is contributed by divyeshrabadiya07
// Java program to implement// the above approachimport java.util.*;class GFG{ // Function to remove adjacent// duplicate elementsstatic String ShortenString(String str1){ // Store the String without // duplicate elements Stack<Character> st = new Stack<Character>(); // Store the index of str int i = 0; // Traverse the String str while (i < str1.length()) { // Checks if stack is empty // or top of the stack is not // equal to current character if (st.isEmpty() || str1.charAt(i) != st.peek()) { st.add(str1.charAt(i)); i++; } // If top element of the stack is // equal to the current character else { st.pop(); i++; } } // If stack is empty if (st.isEmpty()) { return ("Empty String"); } // If stack is not Empty else { String short_String = ""; while (!st.isEmpty()) { short_String = st.peek() + short_String; st.pop(); } return (short_String); }} // Driver Codepublic static void main(String[] args){ String str1 ="azzxzy"; System.out.print(ShortenString(str1)); }} // This code is contributed by Rajput-Ji
# Python3 program to implement# the above approach # Function to remove adjacent# duplicate elementsdef ShortenString(str1): # Store the string without # duplicate elements st = [] # Store the index of str i = 0 # Traverse the string str while i < len(str1): # Checks if stack is empty or top of the # stack is not equal to current character if len(st)== 0 or str1[i] != st[-1]: st.append(str1[i]) i += 1 # If top element of the stack is # equal to the current character else: st.pop() i += 1 # If stack is empty if len(st)== 0: return("Empty String") # If stack is not Empty else: short_string = "" for i in st: short_string += str(i) return(short_string) # Driver Codeif __name__ == "__main__": str1 ="azzxzy" print(ShortenString(str1))
// C# program to implement// the above approachusing System;using System.Collections.Generic; class GFG{ // Function to remove adjacent// duplicate elementsstatic String ShortenString(String str1){ // Store the String without // duplicate elements Stack<char> st = new Stack<char>(); // Store the index of str int i = 0; // Traverse the String str while (i < str1.Length) { // Checks if stack is empty // or top of the stack is not // equal to current character if (st.Count == 0 || (st.Count != 0 && str1[i] != st.Peek())) { st.Push(str1[i]); i++; } // If top element of the stack is // equal to the current character else { if (st.Count != 0) st.Pop(); i++; } } // If stack is empty if (st.Count == 0) { return ("Empty String"); } // If stack is not Empty else { String short_String = ""; while (st.Count != 0) { short_String = st.Peek() + short_String; st.Pop(); } return (short_String); }} // Driver Codepublic static void Main(String[] args){ String str1 ="azzxzy"; Console.Write(ShortenString(str1));}} // This code is contributed by Amit Katiyar
<script> // JavaScript program to implement// the above approach // Function to remove adjacent// duplicate elementsfunction ShortenString(str1){ // Store the string without // duplicate elements var st = []; // Store the index of str var i = 0; // Traverse the string str while (i < str1.length) { // Checks if stack is empty or top of the // stack is not equal to current character if (st.length==0 || str1[i] != st[st.length-1]) { st.push(str1[i]); i++; } // If top element of the stack is // equal to the current character else { st.pop(); i++; } } // If stack is empty if (st.length==0) { return ("Empty String"); } // If stack is not Empty else { var short_string = ""; while(st.length!=0) { short_string = st[st.length-1] + short_string; st.pop(); } return (short_string); }} // Driver Codevar str1 ="azzxzy";document.write( ShortenString(str1)); </script>
axzy
Time Complexity: O(N)Auxiliary Space: O(N)
divyeshrabadiya07
Rajput-Ji
amit143katiyar
noob2000
kapoorsagar226
Recursion
Stack
Strings
Strings
Recursion
Stack
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 54,
"s": 26,
"text": "\n01 Sep, 2021"
},
{
"code": null,
"e": 158,
"s": 54,
"text": "Given a string, str, the task is to remove all the duplicate adjacent characters from the given string."
},
{
"code": null,
"e": 168,
"s": 158,
"text": "Examples:"
},
{
"code": null,
"e": 368,
"s": 168,
"text": "Input: str= βazxxzyβOutput: ay Removal of βxxβ modifies the string to βazzyβ. Now, the removal of βzzβ modifies the string to βayβ. Since the string βayβ doesnβt contain duplicates, the output is ay."
},
{
"code": null,
"e": 404,
"s": 368,
"text": "Input: βaaccddβOutput: Empty String"
},
{
"code": null,
"e": 566,
"s": 404,
"text": "Recursive Approach: Refer to the article Recursively remove all adjacent duplicates to solve this problem recursively. Time Complexity: O(N)Auxiliary Space: O(N)"
},
{
"code": null,
"e": 775,
"s": 566,
"text": "String Functions-based Approach: Refer to this article Remove first adjacent pairs of similar characters until possible to solve this problem using inbuilt functions pop_back() and back() methods of string. "
},
{
"code": null,
"e": 819,
"s": 775,
"text": "Time Complexity: O(N)Auxiliary Space: O(N) "
},
{
"code": null,
"e": 1220,
"s": 819,
"text": "Stack-based Approach: The problem can be solved using Stack to use the property of LIFO. The idea is to traverse the string from left to right and check if the stack is empty or the top element of the stack is not equal to the current character of str, then push the current character into the stack. Otherwise, pop the element from the top of the stack. Follow the steps below to solve the problem: "
},
{
"code": null,
"e": 1579,
"s": 1220,
"text": "Create a stack, st to remove the adjacent duplicate characters in str.Traverse the string str and check if the stack is empty or the top element of the stack not equal to the current character. If found to be true, push the current character into st.Otherwise, pop the element from the top of the stack.Finally, print all the remaining elements of the stack."
},
{
"code": null,
"e": 1650,
"s": 1579,
"text": "Create a stack, st to remove the adjacent duplicate characters in str."
},
{
"code": null,
"e": 1831,
"s": 1650,
"text": "Traverse the string str and check if the stack is empty or the top element of the stack not equal to the current character. If found to be true, push the current character into st."
},
{
"code": null,
"e": 1885,
"s": 1831,
"text": "Otherwise, pop the element from the top of the stack."
},
{
"code": null,
"e": 1941,
"s": 1885,
"text": "Finally, print all the remaining elements of the stack."
},
{
"code": null,
"e": 1945,
"s": 1941,
"text": "C++"
},
{
"code": null,
"e": 1950,
"s": 1945,
"text": "Java"
},
{
"code": null,
"e": 1958,
"s": 1950,
"text": "Python3"
},
{
"code": null,
"e": 1961,
"s": 1958,
"text": "C#"
},
{
"code": null,
"e": 1972,
"s": 1961,
"text": "Javascript"
},
{
"code": "// C++ program to implement// the above approach#include <bits/stdc++.h>using namespace std; // Function to remove adjacent// duplicate elementsstring ShortenString(string str1){ // Store the string without // duplicate elements stack<char> st; // Store the index of str int i = 0; // Traverse the string str while (i < str1.length()) { // Checks if stack is empty or top of the // stack is not equal to current character if (st.empty() || str1[i] != st.top()) { st.push(str1[i]); i++; } // If top element of the stack is // equal to the current character else { st.pop(); i++; } } // If stack is empty if (st.empty()) { return (\"Empty String\"); } // If stack is not Empty else { string short_string = \"\"; while (!st.empty()) { short_string = st.top() + short_string; st.pop(); } return (short_string); }} // Driver Codeint main(){ string str1 =\"azzxzy\"; cout << ShortenString(str1); return 0;} // This code is contributed by divyeshrabadiya07",
"e": 3237,
"s": 1972,
"text": null
},
{
"code": "// Java program to implement// the above approachimport java.util.*;class GFG{ // Function to remove adjacent// duplicate elementsstatic String ShortenString(String str1){ // Store the String without // duplicate elements Stack<Character> st = new Stack<Character>(); // Store the index of str int i = 0; // Traverse the String str while (i < str1.length()) { // Checks if stack is empty // or top of the stack is not // equal to current character if (st.isEmpty() || str1.charAt(i) != st.peek()) { st.add(str1.charAt(i)); i++; } // If top element of the stack is // equal to the current character else { st.pop(); i++; } } // If stack is empty if (st.isEmpty()) { return (\"Empty String\"); } // If stack is not Empty else { String short_String = \"\"; while (!st.isEmpty()) { short_String = st.peek() + short_String; st.pop(); } return (short_String); }} // Driver Codepublic static void main(String[] args){ String str1 =\"azzxzy\"; System.out.print(ShortenString(str1)); }} // This code is contributed by Rajput-Ji",
"e": 4389,
"s": 3237,
"text": null
},
{
"code": "# Python3 program to implement# the above approach # Function to remove adjacent# duplicate elementsdef ShortenString(str1): # Store the string without # duplicate elements st = [] # Store the index of str i = 0 # Traverse the string str while i < len(str1): # Checks if stack is empty or top of the # stack is not equal to current character if len(st)== 0 or str1[i] != st[-1]: st.append(str1[i]) i += 1 # If top element of the stack is # equal to the current character else: st.pop() i += 1 # If stack is empty if len(st)== 0: return(\"Empty String\") # If stack is not Empty else: short_string = \"\" for i in st: short_string += str(i) return(short_string) # Driver Codeif __name__ == \"__main__\": str1 =\"azzxzy\" print(ShortenString(str1))",
"e": 5357,
"s": 4389,
"text": null
},
{
"code": "// C# program to implement// the above approachusing System;using System.Collections.Generic; class GFG{ // Function to remove adjacent// duplicate elementsstatic String ShortenString(String str1){ // Store the String without // duplicate elements Stack<char> st = new Stack<char>(); // Store the index of str int i = 0; // Traverse the String str while (i < str1.Length) { // Checks if stack is empty // or top of the stack is not // equal to current character if (st.Count == 0 || (st.Count != 0 && str1[i] != st.Peek())) { st.Push(str1[i]); i++; } // If top element of the stack is // equal to the current character else { if (st.Count != 0) st.Pop(); i++; } } // If stack is empty if (st.Count == 0) { return (\"Empty String\"); } // If stack is not Empty else { String short_String = \"\"; while (st.Count != 0) { short_String = st.Peek() + short_String; st.Pop(); } return (short_String); }} // Driver Codepublic static void Main(String[] args){ String str1 =\"azzxzy\"; Console.Write(ShortenString(str1));}} // This code is contributed by Amit Katiyar",
"e": 6775,
"s": 5357,
"text": null
},
{
"code": "<script> // JavaScript program to implement// the above approach // Function to remove adjacent// duplicate elementsfunction ShortenString(str1){ // Store the string without // duplicate elements var st = []; // Store the index of str var i = 0; // Traverse the string str while (i < str1.length) { // Checks if stack is empty or top of the // stack is not equal to current character if (st.length==0 || str1[i] != st[st.length-1]) { st.push(str1[i]); i++; } // If top element of the stack is // equal to the current character else { st.pop(); i++; } } // If stack is empty if (st.length==0) { return (\"Empty String\"); } // If stack is not Empty else { var short_string = \"\"; while(st.length!=0) { short_string = st[st.length-1] + short_string; st.pop(); } return (short_string); }} // Driver Codevar str1 =\"azzxzy\";document.write( ShortenString(str1)); </script>",
"e": 7947,
"s": 6775,
"text": null
},
{
"code": null,
"e": 7952,
"s": 7947,
"text": "axzy"
},
{
"code": null,
"e": 7995,
"s": 7952,
"text": "Time Complexity: O(N)Auxiliary Space: O(N)"
},
{
"code": null,
"e": 8013,
"s": 7995,
"text": "divyeshrabadiya07"
},
{
"code": null,
"e": 8023,
"s": 8013,
"text": "Rajput-Ji"
},
{
"code": null,
"e": 8038,
"s": 8023,
"text": "amit143katiyar"
},
{
"code": null,
"e": 8047,
"s": 8038,
"text": "noob2000"
},
{
"code": null,
"e": 8062,
"s": 8047,
"text": "kapoorsagar226"
},
{
"code": null,
"e": 8072,
"s": 8062,
"text": "Recursion"
},
{
"code": null,
"e": 8078,
"s": 8072,
"text": "Stack"
},
{
"code": null,
"e": 8086,
"s": 8078,
"text": "Strings"
},
{
"code": null,
"e": 8094,
"s": 8086,
"text": "Strings"
},
{
"code": null,
"e": 8104,
"s": 8094,
"text": "Recursion"
},
{
"code": null,
"e": 8110,
"s": 8104,
"text": "Stack"
}
] |
Django Channels β Introduction and Basic Setup
|
13 Apr, 2021
Django is a powerful Python framework for web development. It is fast, secure, and reliable. Channels allow Django projects to handle HTTP along with asynchronous protocols like WebSockets, MQTT, chatbots, and more.
Channels preserve the synchronous behavior of Django and add a layer of asynchronous protocols allowing users to write the views that are entirely synchronous, asynchronous, or a mixture of both. Channels basically allow the application to support βlong-running connectionsβ. It replaces Djangoβs default WSGI with its ASGI.
ASGI (Asynchronous Server Gateway Interface) provides an interface between async Python web servers and applications while it supports all the features provided by WSGI.
A consumer is a basic unit of Channels. It is an event-driven class that supports both async and sync applications. Consumers can run longer and hence they support web sockets that need persistent connection.
In this post, we will set up a basic example of channels. We will build a calculator app that will allow the user to send multiple expressions to the server and receive the result through a single persistent connection.
It is always a good idea to create a virtual environment for the python apps in order to avoid version conflicts. Run the following commands in the terminal to get started
easy-install pip
python3 -m pip install virtualenv
virtualenv venv
source venv/bin/activate
Now install Django and Channels:
pip install django
pip install channels
# On windows, try an unofficial wheel of 'Twisted' in case of dependency errors
Now start a Django project and create an app named βliveCalculatorβ
django-admin startproject sampleProject
cd sampleProject
python3 manage.py startapp liveCalculator
In sampleProject/settings.py, register channels and liveCalculator.
settings.py:
INSTALLED_APPS = [
'channels',
'liveCalculator',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
In sampleProject/asgi.py, add the http protocol.
asgi.py:
Python3
import os import djangofrom channels.http import AsgiHandlerfrom channels.routing import ProtocolTypeRouter os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sampleProject.settings')django.setup() application = ProtocolTypeRouter({ "http": AsgiHandler(), # Just HTTP for now. (We can add other protocols later.)})
Now we need to register this asgi into our application. Add this line in sampleProject/settings.py :
ASGI_APPLICATION = "sampleProject.asgi.application"
Create a new folder liveCalculator/templates/liveCalculator and create a new file index.html inside it. It will be the starting page of our app. Add the following code in index.html:
index.html:
HTML
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Live Calculator</title></head> <body> <textarea name="ta" id="results" cols="30" rows="10"> </textarea><br> Enter the expression: <input type="text" id="exp"> <input type="button" id="submit" value="Get Results"> <script> const socket = new WebSocket('ws://localhost:8000/ws/livec/'); socket.onmessage = (e) => { result = JSON.parse(e.data).result; document.getElementById("results").value += "Server: " + result + "\n"; } socket.onclose = (e) => { console.log("Socket closed!"); } document.querySelector('#exp').onkeyup = function (e) { if (e.keyCode === 13) { // enter, return document.querySelector('#submit ').click(); } }; document.querySelector("#submit").onclick = (e) => { inputfield = document.querySelector("#exp") exp = inputfield.value socket.send(JSON.stringify( { expression: exp } )) document.querySelector("#results").value += "You: " + exp + "\n"; inputfield.value = ""; } </script></body> </html>
The above code will render a text area and an input box where the user can enter the expression. It will create a socket connection that we will make later and append the received result in the text area. When the user inputs the expression, it will send the expression through a socket connection.
Now create a view to render this page in liveCalculator/views.py :
liveCalculator/views.py:
Python3
from django.shortcuts import render # Create your views here. def index(request): return render(request, 'liveCalculator/index.html', {})
Next, we need to create a route for this view. Add a new file urls.py in liveCalculator directory and add the following code:
liveCalculator/urls.py:
Python3
from django.conf.urls import urlfrom . import views urlpatterns = [ url(r'^$', views.index, name="index"),]
Register this route in sampleProject/urls.py :
sampleProject/urls.py:
Python3
from django.contrib import adminfrom django.urls import pathfrom django.conf.urls import include, urlurlpatterns = [ path('admin/', admin.site.urls), url(r'^', include('liveCalculator.urls'))]
Now we need to create the consumer for our web socket connection. We will use the generic WebsocketConsumer class to implement its event-driven methods. Create a new file consumers.py in liveCalculator folder and add the following code:
consumers.py:
Python3
import jsonfrom channels.generic.websocket import WebsocketConsumer class Calculator(WebsocketConsumer): def connect(self): self.accept() def disconnect(self, close_code): self.close() def receive(self, text_data): text_data_json = json.loads(text_data) expression = text_data_json['expression'] try: result = eval(expression) except Exception as e: result = "Invalid Expression" self.send(text_data=json.dumps({ 'result': result }))
The WebsocketConsumer class supports these user-defined methods:
connect(): We can write the business logic of what should happen when the client sends a connection request.
disconnect(): We can write the business logic of what should happen when the client sends a disconnection request.
receive(): We can write the business logic of what should happen when the client sends a message.
It also supports these built-in methods:
accept(): It will accept the incoming connection.
close(): It will close the current connection.
send(): It will send the specified message to the client.
We have simply used the above methods in our Calculator class to accept the connection, evaluate the expression when a message a received, and send it to the client.
Next, we also need to define the routing method for this consumer. Create a new file routing.py in the same folder and add the following code to it:
routing.py:
Python3
from django.urls import re_path from . import consumers websocket_urlpatterns = [ re_path(r'ws/livec/$', consumers.Calculator.as_asgi()),]
Note that we have used as_asgi() method on our Calculator class to use it for our application. This will enable the socket on ws://<IP:Port>/ws/livec. Now register routing.py into asgi.py by declaring the WebSocket protocol.
asgi.py:
Python3
import os from channels.auth import AuthMiddlewareStackfrom channels.routing import ProtocolTypeRouter, URLRouterfrom django.core.asgi import get_asgi_applicationimport liveCalculator.routing os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sampleProject.settings") application = ProtocolTypeRouter({ "http": get_asgi_application(), "websocket": AuthMiddlewareStack( URLRouter( liveCalculator.routing.websocket_urlpatterns ) ),})
We are almost done with our first Channels application. Save all the files and run the following commands in the terminal:
python3 manage.py makemigrations
python3 manage.py migrate
python3 manage.py runserver
Now open http://localhost:8000 on your browser, and you should see the output like this:
See the log of the server. Note that we have created the connection only once, and we can send the message multiple times without creating a new connection.
Socket-programming
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 54,
"s": 26,
"text": "\n13 Apr, 2021"
},
{
"code": null,
"e": 271,
"s": 54,
"text": "Django is a powerful Python framework for web development. It is fast, secure, and reliable. Channels allow Django projects to handle HTTP along with asynchronous protocols like WebSockets, MQTT, chatbots, and more. "
},
{
"code": null,
"e": 596,
"s": 271,
"text": "Channels preserve the synchronous behavior of Django and add a layer of asynchronous protocols allowing users to write the views that are entirely synchronous, asynchronous, or a mixture of both. Channels basically allow the application to support βlong-running connectionsβ. It replaces Djangoβs default WSGI with its ASGI."
},
{
"code": null,
"e": 766,
"s": 596,
"text": "ASGI (Asynchronous Server Gateway Interface) provides an interface between async Python web servers and applications while it supports all the features provided by WSGI."
},
{
"code": null,
"e": 975,
"s": 766,
"text": "A consumer is a basic unit of Channels. It is an event-driven class that supports both async and sync applications. Consumers can run longer and hence they support web sockets that need persistent connection."
},
{
"code": null,
"e": 1195,
"s": 975,
"text": "In this post, we will set up a basic example of channels. We will build a calculator app that will allow the user to send multiple expressions to the server and receive the result through a single persistent connection."
},
{
"code": null,
"e": 1367,
"s": 1195,
"text": "It is always a good idea to create a virtual environment for the python apps in order to avoid version conflicts. Run the following commands in the terminal to get started"
},
{
"code": null,
"e": 1459,
"s": 1367,
"text": "easy-install pip\npython3 -m pip install virtualenv\nvirtualenv venv\nsource venv/bin/activate"
},
{
"code": null,
"e": 1492,
"s": 1459,
"text": "Now install Django and Channels:"
},
{
"code": null,
"e": 1612,
"s": 1492,
"text": "pip install django\npip install channels\n# On windows, try an unofficial wheel of 'Twisted' in case of dependency errors"
},
{
"code": null,
"e": 1680,
"s": 1612,
"text": "Now start a Django project and create an app named βliveCalculatorβ"
},
{
"code": null,
"e": 1779,
"s": 1680,
"text": "django-admin startproject sampleProject\ncd sampleProject\npython3 manage.py startapp liveCalculator"
},
{
"code": null,
"e": 1847,
"s": 1779,
"text": "In sampleProject/settings.py, register channels and liveCalculator."
},
{
"code": null,
"e": 1860,
"s": 1847,
"text": "settings.py:"
},
{
"code": null,
"e": 2097,
"s": 1860,
"text": "INSTALLED_APPS = [\n 'channels',\n 'liveCalculator',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]"
},
{
"code": null,
"e": 2146,
"s": 2097,
"text": "In sampleProject/asgi.py, add the http protocol."
},
{
"code": null,
"e": 2155,
"s": 2146,
"text": "asgi.py:"
},
{
"code": null,
"e": 2163,
"s": 2155,
"text": "Python3"
},
{
"code": "import os import djangofrom channels.http import AsgiHandlerfrom channels.routing import ProtocolTypeRouter os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sampleProject.settings')django.setup() application = ProtocolTypeRouter({ \"http\": AsgiHandler(), # Just HTTP for now. (We can add other protocols later.)})",
"e": 2481,
"s": 2163,
"text": null
},
{
"code": null,
"e": 2582,
"s": 2481,
"text": "Now we need to register this asgi into our application. Add this line in sampleProject/settings.py :"
},
{
"code": null,
"e": 2634,
"s": 2582,
"text": "ASGI_APPLICATION = \"sampleProject.asgi.application\""
},
{
"code": null,
"e": 2817,
"s": 2634,
"text": "Create a new folder liveCalculator/templates/liveCalculator and create a new file index.html inside it. It will be the starting page of our app. Add the following code in index.html:"
},
{
"code": null,
"e": 2829,
"s": 2817,
"text": "index.html:"
},
{
"code": null,
"e": 2834,
"s": 2829,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <title>Live Calculator</title></head> <body> <textarea name=\"ta\" id=\"results\" cols=\"30\" rows=\"10\"> </textarea><br> Enter the expression: <input type=\"text\" id=\"exp\"> <input type=\"button\" id=\"submit\" value=\"Get Results\"> <script> const socket = new WebSocket('ws://localhost:8000/ws/livec/'); socket.onmessage = (e) => { result = JSON.parse(e.data).result; document.getElementById(\"results\").value += \"Server: \" + result + \"\\n\"; } socket.onclose = (e) => { console.log(\"Socket closed!\"); } document.querySelector('#exp').onkeyup = function (e) { if (e.keyCode === 13) { // enter, return document.querySelector('#submit ').click(); } }; document.querySelector(\"#submit\").onclick = (e) => { inputfield = document.querySelector(\"#exp\") exp = inputfield.value socket.send(JSON.stringify( { expression: exp } )) document.querySelector(\"#results\").value += \"You: \" + exp + \"\\n\"; inputfield.value = \"\"; } </script></body> </html>",
"e": 4178,
"s": 2834,
"text": null
},
{
"code": null,
"e": 4477,
"s": 4178,
"text": "The above code will render a text area and an input box where the user can enter the expression. It will create a socket connection that we will make later and append the received result in the text area. When the user inputs the expression, it will send the expression through a socket connection."
},
{
"code": null,
"e": 4544,
"s": 4477,
"text": "Now create a view to render this page in liveCalculator/views.py :"
},
{
"code": null,
"e": 4569,
"s": 4544,
"text": "liveCalculator/views.py:"
},
{
"code": null,
"e": 4577,
"s": 4569,
"text": "Python3"
},
{
"code": "from django.shortcuts import render # Create your views here. def index(request): return render(request, 'liveCalculator/index.html', {})",
"e": 4720,
"s": 4577,
"text": null
},
{
"code": null,
"e": 4846,
"s": 4720,
"text": "Next, we need to create a route for this view. Add a new file urls.py in liveCalculator directory and add the following code:"
},
{
"code": null,
"e": 4870,
"s": 4846,
"text": "liveCalculator/urls.py:"
},
{
"code": null,
"e": 4878,
"s": 4870,
"text": "Python3"
},
{
"code": "from django.conf.urls import urlfrom . import views urlpatterns = [ url(r'^$', views.index, name=\"index\"),]",
"e": 4990,
"s": 4878,
"text": null
},
{
"code": null,
"e": 5037,
"s": 4990,
"text": "Register this route in sampleProject/urls.py :"
},
{
"code": null,
"e": 5060,
"s": 5037,
"text": "sampleProject/urls.py:"
},
{
"code": null,
"e": 5068,
"s": 5060,
"text": "Python3"
},
{
"code": "from django.contrib import adminfrom django.urls import pathfrom django.conf.urls import include, urlurlpatterns = [ path('admin/', admin.site.urls), url(r'^', include('liveCalculator.urls'))]",
"e": 5267,
"s": 5068,
"text": null
},
{
"code": null,
"e": 5504,
"s": 5267,
"text": "Now we need to create the consumer for our web socket connection. We will use the generic WebsocketConsumer class to implement its event-driven methods. Create a new file consumers.py in liveCalculator folder and add the following code:"
},
{
"code": null,
"e": 5518,
"s": 5504,
"text": "consumers.py:"
},
{
"code": null,
"e": 5526,
"s": 5518,
"text": "Python3"
},
{
"code": "import jsonfrom channels.generic.websocket import WebsocketConsumer class Calculator(WebsocketConsumer): def connect(self): self.accept() def disconnect(self, close_code): self.close() def receive(self, text_data): text_data_json = json.loads(text_data) expression = text_data_json['expression'] try: result = eval(expression) except Exception as e: result = \"Invalid Expression\" self.send(text_data=json.dumps({ 'result': result }))",
"e": 6066,
"s": 5526,
"text": null
},
{
"code": null,
"e": 6131,
"s": 6066,
"text": "The WebsocketConsumer class supports these user-defined methods:"
},
{
"code": null,
"e": 6240,
"s": 6131,
"text": "connect(): We can write the business logic of what should happen when the client sends a connection request."
},
{
"code": null,
"e": 6355,
"s": 6240,
"text": "disconnect(): We can write the business logic of what should happen when the client sends a disconnection request."
},
{
"code": null,
"e": 6453,
"s": 6355,
"text": "receive(): We can write the business logic of what should happen when the client sends a message."
},
{
"code": null,
"e": 6494,
"s": 6453,
"text": "It also supports these built-in methods:"
},
{
"code": null,
"e": 6544,
"s": 6494,
"text": "accept(): It will accept the incoming connection."
},
{
"code": null,
"e": 6591,
"s": 6544,
"text": "close(): It will close the current connection."
},
{
"code": null,
"e": 6649,
"s": 6591,
"text": "send(): It will send the specified message to the client."
},
{
"code": null,
"e": 6815,
"s": 6649,
"text": "We have simply used the above methods in our Calculator class to accept the connection, evaluate the expression when a message a received, and send it to the client."
},
{
"code": null,
"e": 6964,
"s": 6815,
"text": "Next, we also need to define the routing method for this consumer. Create a new file routing.py in the same folder and add the following code to it:"
},
{
"code": null,
"e": 6976,
"s": 6964,
"text": "routing.py:"
},
{
"code": null,
"e": 6984,
"s": 6976,
"text": "Python3"
},
{
"code": "from django.urls import re_path from . import consumers websocket_urlpatterns = [ re_path(r'ws/livec/$', consumers.Calculator.as_asgi()),]",
"e": 7128,
"s": 6984,
"text": null
},
{
"code": null,
"e": 7354,
"s": 7128,
"text": "Note that we have used as_asgi() method on our Calculator class to use it for our application. This will enable the socket on ws://<IP:Port>/ws/livec. Now register routing.py into asgi.py by declaring the WebSocket protocol. "
},
{
"code": null,
"e": 7363,
"s": 7354,
"text": "asgi.py:"
},
{
"code": null,
"e": 7371,
"s": 7363,
"text": "Python3"
},
{
"code": "import os from channels.auth import AuthMiddlewareStackfrom channels.routing import ProtocolTypeRouter, URLRouterfrom django.core.asgi import get_asgi_applicationimport liveCalculator.routing os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"sampleProject.settings\") application = ProtocolTypeRouter({ \"http\": get_asgi_application(), \"websocket\": AuthMiddlewareStack( URLRouter( liveCalculator.routing.websocket_urlpatterns ) ),})",
"e": 7834,
"s": 7371,
"text": null
},
{
"code": null,
"e": 7957,
"s": 7834,
"text": "We are almost done with our first Channels application. Save all the files and run the following commands in the terminal:"
},
{
"code": null,
"e": 8044,
"s": 7957,
"text": "python3 manage.py makemigrations\npython3 manage.py migrate\npython3 manage.py runserver"
},
{
"code": null,
"e": 8133,
"s": 8044,
"text": "Now open http://localhost:8000 on your browser, and you should see the output like this:"
},
{
"code": null,
"e": 8291,
"s": 8133,
"text": "See the log of the server. Note that we have created the connection only once, and we can send the message multiple times without creating a new connection. "
},
{
"code": null,
"e": 8310,
"s": 8291,
"text": "Socket-programming"
},
{
"code": null,
"e": 8317,
"s": 8310,
"text": "Python"
}
] |
Python program to Concatenate all Elements of a List into a String
|
15 Jul, 2022
Given a list, the task is to write a Python program to concatenate all elements in a list into a string.
Examples:
Input: ['hello', 'geek', 'have', 'a', 'geeky', 'day']
Output: hello geek have a geeky day
Here, we are taking a list of words, and by using the Python loop we are iterating each element and concatenating words with the help of the β+β operator.
Python3
l = [ 'hello', 'geek', 'have', 'a', 'geeky', 'day'] ans = ' 'for i in l: # concatenating the strings # using + operator ans = ans+ ' '+ i print(ans)
Output:
hello geek have a geeky day
Here, we are using list comprehension to make the items into a Python string.
Python3
l = [ 'hello', 'geek', 'have', 'a', 'geeky', 'day'] res = " ".join([str(item) for item in l])res
Output:
'hello geek have a geeky day'
The join() is an inbuilt string function in Python used to join elements of the sequence separated by a string separator. This function joins elements of a sequence and makes it a string.
Python3
# codel = ['hello', 'geek', 'have', 'a', '1', 'day'] # this will join all the# elements of the list with ' 'l = ' '.join(l)print(l)
Output:
hello geek have a 1 day
The map() function returns a map object(which is an iterator) of the results after applying the given function to each item of a given iterable. In map we passed the str as Datatype and list, this will iterate till the length of the list and join each element to form a string.
Python3
l = [ 'hello', 'geek', 'have', 'a', 'geeky', 'day'] res = " ".join(map(str, l))print(res)
Output:
hello geek have a geeky day
The reduce(fun,seq) function is used to apply a particular function passed in its argument to all of the list elements mentioned in the sequence passed along. This function is defined in βfunctoolsβ module.
Python3
from functools import reduce l = ['hello', 'geek', 'have', 'a', 'geeky', 'day'] # Concatenate all items in list to a stringres = reduce(lambda x, y: x + ' ' + y, map(str, l))print(res)
Output:
hello geek have a geeky day
surajkumarguptaintern
Python string-programs
Technical Scripter 2020
Python
Python Programs
Technical Scripter
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n15 Jul, 2022"
},
{
"code": null,
"e": 133,
"s": 28,
"text": "Given a list, the task is to write a Python program to concatenate all elements in a list into a string."
},
{
"code": null,
"e": 143,
"s": 133,
"text": "Examples:"
},
{
"code": null,
"e": 234,
"s": 143,
"text": "Input: ['hello', 'geek', 'have', 'a', 'geeky', 'day']\nOutput: hello geek have a geeky day"
},
{
"code": null,
"e": 389,
"s": 234,
"text": "Here, we are taking a list of words, and by using the Python loop we are iterating each element and concatenating words with the help of the β+β operator."
},
{
"code": null,
"e": 397,
"s": 389,
"text": "Python3"
},
{
"code": "l = [ 'hello', 'geek', 'have', 'a', 'geeky', 'day'] ans = ' 'for i in l: # concatenating the strings # using + operator ans = ans+ ' '+ i print(ans)",
"e": 558,
"s": 397,
"text": null
},
{
"code": null,
"e": 566,
"s": 558,
"text": "Output:"
},
{
"code": null,
"e": 594,
"s": 566,
"text": "hello geek have a geeky day"
},
{
"code": null,
"e": 673,
"s": 594,
"text": "Here, we are using list comprehension to make the items into a Python string. "
},
{
"code": null,
"e": 681,
"s": 673,
"text": "Python3"
},
{
"code": "l = [ 'hello', 'geek', 'have', 'a', 'geeky', 'day'] res = \" \".join([str(item) for item in l])res",
"e": 782,
"s": 681,
"text": null
},
{
"code": null,
"e": 790,
"s": 782,
"text": "Output:"
},
{
"code": null,
"e": 820,
"s": 790,
"text": "'hello geek have a geeky day'"
},
{
"code": null,
"e": 1009,
"s": 820,
"text": "The join() is an inbuilt string function in Python used to join elements of the sequence separated by a string separator. This function joins elements of a sequence and makes it a string. "
},
{
"code": null,
"e": 1017,
"s": 1009,
"text": "Python3"
},
{
"code": "# codel = ['hello', 'geek', 'have', 'a', '1', 'day'] # this will join all the# elements of the list with ' 'l = ' '.join(l)print(l)",
"e": 1151,
"s": 1017,
"text": null
},
{
"code": null,
"e": 1159,
"s": 1151,
"text": "Output:"
},
{
"code": null,
"e": 1183,
"s": 1159,
"text": "hello geek have a 1 day"
},
{
"code": null,
"e": 1461,
"s": 1183,
"text": "The map() function returns a map object(which is an iterator) of the results after applying the given function to each item of a given iterable. In map we passed the str as Datatype and list, this will iterate till the length of the list and join each element to form a string."
},
{
"code": null,
"e": 1469,
"s": 1461,
"text": "Python3"
},
{
"code": "l = [ 'hello', 'geek', 'have', 'a', 'geeky', 'day'] res = \" \".join(map(str, l))print(res)",
"e": 1563,
"s": 1469,
"text": null
},
{
"code": null,
"e": 1571,
"s": 1563,
"text": "Output:"
},
{
"code": null,
"e": 1599,
"s": 1571,
"text": "hello geek have a geeky day"
},
{
"code": null,
"e": 1806,
"s": 1599,
"text": "The reduce(fun,seq) function is used to apply a particular function passed in its argument to all of the list elements mentioned in the sequence passed along. This function is defined in βfunctoolsβ module."
},
{
"code": null,
"e": 1814,
"s": 1806,
"text": "Python3"
},
{
"code": "from functools import reduce l = ['hello', 'geek', 'have', 'a', 'geeky', 'day'] # Concatenate all items in list to a stringres = reduce(lambda x, y: x + ' ' + y, map(str, l))print(res)",
"e": 2003,
"s": 1814,
"text": null
},
{
"code": null,
"e": 2011,
"s": 2003,
"text": "Output:"
},
{
"code": null,
"e": 2039,
"s": 2011,
"text": "hello geek have a geeky day"
},
{
"code": null,
"e": 2061,
"s": 2039,
"text": "surajkumarguptaintern"
},
{
"code": null,
"e": 2084,
"s": 2061,
"text": "Python string-programs"
},
{
"code": null,
"e": 2108,
"s": 2084,
"text": "Technical Scripter 2020"
},
{
"code": null,
"e": 2115,
"s": 2108,
"text": "Python"
},
{
"code": null,
"e": 2131,
"s": 2115,
"text": "Python Programs"
},
{
"code": null,
"e": 2150,
"s": 2131,
"text": "Technical Scripter"
}
] |
Find just strictly greater element from first array for each element in second array - GeeksforGeeks
|
15 Feb, 2021
Given two arrays A[] and B[] containing N elements, the task is to find, for every element in the array B[], the element which is just strictly greater than that element which is present in the array A[]. If no value is present, then print βnullβ.
Note: The value from the array A[] can only be used once.
Examples:
Input: A[] = {0, 1, 2, 3, 4}, B[] = {0, 1, 1, 2, 3} Output: 1 2 3 4 null Explanation: On iterating every element in the array B[]: The value which is strictly greater than 0 and present in the array A[] is 1. Similarly, the value which is strictly greater than 1 and present in the array A[] is 2. Similarly, the value which is strictly greater than 1 and present in the array A[] is 3 because 2 has already been used for the previous 1. Similarly, the value which is strictly greater than 2 and present in the array A[] is 4. Now, there is no value in the array which is greater than 3 because 4 has already been used for the previous 2. So, null is printed.
Input: A[] = {0, 1, 6, 4, 0, 2, 4, 2, 4, 7}, B[] = {0, 1, 6, 4, 0, 2, 4, 2, 4, 7} Output: 1 2 7 6 2 4 null 4 null null
Approach: The idea is to use the Tree set Data structure. But since a tree set doesnβt support duplicate values, a hashmap is used to store the frequency of the elements.
Iterate through the array A[].
Add the elements in the array A[] into the tree set.
Update their frequencies in the hashmap.
Now, for every element in the array B[], find the value which is strictly greater than the current value by using the higher() function of the tree set.
Now, reduce the frequency of this number in the hash map by 1.
Keep repeating the above two steps until the frequency of the numbers become 0. If it is 0, then all the occurrences of that number have been used up for the elements. So, remove that element from the tree set.
Below is the implementation of the above approach:
C++
Java
Python3
// C++ program to find the values// strictly greater than the element// and present in the array#include<bits/stdc++.h>using namespace std; // Function to find the values// strictly greater than the element// and present in the arrayvoid operations(int n, long long A[], long long B[]){ // Treeset to store the // values of the array A set<long long>tree; // HashMap to store the frequencies // of the values in array A map<long long, int>freqMap; // Iterating through the array // and add values in the treeset for(int j = 0; j < n; j++) { long long x = A[j]; tree.insert(x); freqMap[x]++; } // Finding the strictly greater value // in the array A[] using "higher()" // function and also reducing the // frequency of that value because it // has to be used only once for(int j = 0; j < n; j++) { long long x = B[j]; // If the higher value exists if (tree.upper_bound(x) != tree.end()) { cout << *tree.upper_bound(x) << " "; // If the frequency value is 1 // then remove it from treeset // because it has been used // and its frequency becomes 0 if (freqMap[*tree.upper_bound(x)] == 1) { tree.erase(*tree.upper_bound(x)); } // Else, reducing the frequency // by 1 else { freqMap[*tree.upper_bound(x)]--; } } // If the value is not present // then print null else { cout << "null "; } }} // Driver codeint main(){ int n = 12; long long A[] = { 9, 5, 100, 4, 89, 2, 0, 2, 89, 77, 77, 77 }; long long B[] = { 0, 18, 60, 34, 50, 29, 4, 20, 48, 77, 2, 8 }; operations(n, A, B);} // This code is contributed by Stream_Cipher
// Java program to find the values// strictly greater than the element// and present in the array import java.io.*;import java.util.*;public class GFG { // Function to find the values // strictly greater than the element // and present in the array public static void operations( int n, long A[], long B[]) { // Treeset to store the // values of the array A TreeSet<Long> tree = new TreeSet<Long>(); // HashMap to store the frequencies // of the values in array A HashMap<Long, Integer> freqMap = new HashMap<Long, Integer>(); // Iterating through the array // and add values in the treeset for (int j = 0; j < n; j++) { long x = A[j]; tree.add(x); // Updating the frequencies if (freqMap.containsKey(x)) { freqMap.put(x, freqMap.get(x) + 1); } else { freqMap.put(x, 1); } } // Finding the strictly greater value // in the array A[] using "higher()" // function and also reducing the // frequency of that value because it // has to be used only once for (int j = 0; j < n; j++) { long x = B[j]; // If the higher value exists if (tree.higher(x) != null) { System.out.print(tree.higher(x) + " "); // If the frequency value is 1 // then remove it from treeset // because it has been used // and its frequency becomes 0 if (freqMap.get(tree.higher(x)) == 1) { tree.remove(tree.higher(x)); } // Else, reducing the frequency // by 1 else { freqMap.put( tree.higher(x), freqMap.get(tree.higher(x)) - 1); } } // If the value is not present // then print null else { System.out.print("null "); } } } // Driver code public static void main(String args[]) { int n = 12; long A[] = new long[] { 9, 5, 100, 4, 89, 2, 0, 2, 89, 77, 77, 77 }; long B[] = new long[] { 0, 18, 60, 34, 50, 29, 4, 20, 48, 77, 2, 8 }; operations(n, A, B); }}
# Python program to find the values# strictly greater than the element# and present in the arrayfrom typing import Listfrom bisect import bisect_right # Function to find the values# strictly greater than the element# and present in the arraydef operations(n: int, A: List[int], B: List[int]) -> None: # Treeset to store the # values of the array A tree = set() # HashMap to store the frequencies # of the values in array A freqMap = dict() # Iterating through the array # and add values in the treeset for j in range(n): x = A[j] tree.add(x) if x not in freqMap: freqMap[x] = 0 freqMap[x] += 1 # Finding the strictly greater value # in the array A[] using "higher()" # function and also reducing the # frequency of that value because it # has to be used only once for j in range(n): x = B[j] # If the higher value exists sset = sorted(list(tree)) index = bisect_right(sset, x) if index < len(tree): print(sset[index], end=" ") # If the frequency value is 1 # then remove it from treeset # because it has been used # and its frequency becomes 0 if (freqMap[sset[index]] == 1): tree.remove(sset[index]) # Else, reducing the frequency # by 1 else: freqMap[sset[index]] -= 1 # If the value is not present # then print null else: print("null", end=" ") # Driver codeif __name__ == "__main__": n = 12 A = [9, 5, 100, 4, 89, 2, 0, 2, 89, 77, 77, 77] B = [0, 18, 60, 34, 50, 29, 4, 20, 48, 77, 2, 8] operations(n, A, B) # This code is contributed by sanjeev2552
2 77 77 77 89 89 5 100 null null 4 9
Time Complexity: O(N * log(N)) because the insertion of one element takes log(N) in a tree set.
Stream_Cipher
sanjeev2552
frequency-counting
Java-HashMap
java-treeset
Advanced Data Structure
Arrays
Hash
Tree
Arrays
Hash
Tree
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Extendible Hashing (Dynamic approach to DBMS)
Ternary Search Tree
Proof that Dominant Set of a Graph is NP-Complete
2-3 Trees | (Search, Insert and Deletion)
Advantages of Trie Data Structure
Arrays in Java
Arrays in C/C++
Program for array rotation
Stack Data Structure (Introduction and Program)
Top 50 Array Coding Problems for Interviews
|
[
{
"code": null,
"e": 24071,
"s": 24043,
"text": "\n15 Feb, 2021"
},
{
"code": null,
"e": 24319,
"s": 24071,
"text": "Given two arrays A[] and B[] containing N elements, the task is to find, for every element in the array B[], the element which is just strictly greater than that element which is present in the array A[]. If no value is present, then print βnullβ."
},
{
"code": null,
"e": 24378,
"s": 24319,
"text": "Note: The value from the array A[] can only be used once. "
},
{
"code": null,
"e": 24390,
"s": 24378,
"text": "Examples: "
},
{
"code": null,
"e": 25051,
"s": 24390,
"text": "Input: A[] = {0, 1, 2, 3, 4}, B[] = {0, 1, 1, 2, 3} Output: 1 2 3 4 null Explanation: On iterating every element in the array B[]: The value which is strictly greater than 0 and present in the array A[] is 1. Similarly, the value which is strictly greater than 1 and present in the array A[] is 2. Similarly, the value which is strictly greater than 1 and present in the array A[] is 3 because 2 has already been used for the previous 1. Similarly, the value which is strictly greater than 2 and present in the array A[] is 4. Now, there is no value in the array which is greater than 3 because 4 has already been used for the previous 2. So, null is printed. "
},
{
"code": null,
"e": 25171,
"s": 25051,
"text": "Input: A[] = {0, 1, 6, 4, 0, 2, 4, 2, 4, 7}, B[] = {0, 1, 6, 4, 0, 2, 4, 2, 4, 7} Output: 1 2 7 6 2 4 null 4 null null "
},
{
"code": null,
"e": 25344,
"s": 25171,
"text": "Approach: The idea is to use the Tree set Data structure. But since a tree set doesnβt support duplicate values, a hashmap is used to store the frequency of the elements. "
},
{
"code": null,
"e": 25375,
"s": 25344,
"text": "Iterate through the array A[]."
},
{
"code": null,
"e": 25428,
"s": 25375,
"text": "Add the elements in the array A[] into the tree set."
},
{
"code": null,
"e": 25469,
"s": 25428,
"text": "Update their frequencies in the hashmap."
},
{
"code": null,
"e": 25622,
"s": 25469,
"text": "Now, for every element in the array B[], find the value which is strictly greater than the current value by using the higher() function of the tree set."
},
{
"code": null,
"e": 25685,
"s": 25622,
"text": "Now, reduce the frequency of this number in the hash map by 1."
},
{
"code": null,
"e": 25896,
"s": 25685,
"text": "Keep repeating the above two steps until the frequency of the numbers become 0. If it is 0, then all the occurrences of that number have been used up for the elements. So, remove that element from the tree set."
},
{
"code": null,
"e": 25948,
"s": 25896,
"text": "Below is the implementation of the above approach: "
},
{
"code": null,
"e": 25952,
"s": 25948,
"text": "C++"
},
{
"code": null,
"e": 25957,
"s": 25952,
"text": "Java"
},
{
"code": null,
"e": 25965,
"s": 25957,
"text": "Python3"
},
{
"code": "// C++ program to find the values// strictly greater than the element// and present in the array#include<bits/stdc++.h>using namespace std; // Function to find the values// strictly greater than the element// and present in the arrayvoid operations(int n, long long A[], long long B[]){ // Treeset to store the // values of the array A set<long long>tree; // HashMap to store the frequencies // of the values in array A map<long long, int>freqMap; // Iterating through the array // and add values in the treeset for(int j = 0; j < n; j++) { long long x = A[j]; tree.insert(x); freqMap[x]++; } // Finding the strictly greater value // in the array A[] using \"higher()\" // function and also reducing the // frequency of that value because it // has to be used only once for(int j = 0; j < n; j++) { long long x = B[j]; // If the higher value exists if (tree.upper_bound(x) != tree.end()) { cout << *tree.upper_bound(x) << \" \"; // If the frequency value is 1 // then remove it from treeset // because it has been used // and its frequency becomes 0 if (freqMap[*tree.upper_bound(x)] == 1) { tree.erase(*tree.upper_bound(x)); } // Else, reducing the frequency // by 1 else { freqMap[*tree.upper_bound(x)]--; } } // If the value is not present // then print null else { cout << \"null \"; } }} // Driver codeint main(){ int n = 12; long long A[] = { 9, 5, 100, 4, 89, 2, 0, 2, 89, 77, 77, 77 }; long long B[] = { 0, 18, 60, 34, 50, 29, 4, 20, 48, 77, 2, 8 }; operations(n, A, B);} // This code is contributed by Stream_Cipher",
"e": 27938,
"s": 25965,
"text": null
},
{
"code": "// Java program to find the values// strictly greater than the element// and present in the array import java.io.*;import java.util.*;public class GFG { // Function to find the values // strictly greater than the element // and present in the array public static void operations( int n, long A[], long B[]) { // Treeset to store the // values of the array A TreeSet<Long> tree = new TreeSet<Long>(); // HashMap to store the frequencies // of the values in array A HashMap<Long, Integer> freqMap = new HashMap<Long, Integer>(); // Iterating through the array // and add values in the treeset for (int j = 0; j < n; j++) { long x = A[j]; tree.add(x); // Updating the frequencies if (freqMap.containsKey(x)) { freqMap.put(x, freqMap.get(x) + 1); } else { freqMap.put(x, 1); } } // Finding the strictly greater value // in the array A[] using \"higher()\" // function and also reducing the // frequency of that value because it // has to be used only once for (int j = 0; j < n; j++) { long x = B[j]; // If the higher value exists if (tree.higher(x) != null) { System.out.print(tree.higher(x) + \" \"); // If the frequency value is 1 // then remove it from treeset // because it has been used // and its frequency becomes 0 if (freqMap.get(tree.higher(x)) == 1) { tree.remove(tree.higher(x)); } // Else, reducing the frequency // by 1 else { freqMap.put( tree.higher(x), freqMap.get(tree.higher(x)) - 1); } } // If the value is not present // then print null else { System.out.print(\"null \"); } } } // Driver code public static void main(String args[]) { int n = 12; long A[] = new long[] { 9, 5, 100, 4, 89, 2, 0, 2, 89, 77, 77, 77 }; long B[] = new long[] { 0, 18, 60, 34, 50, 29, 4, 20, 48, 77, 2, 8 }; operations(n, A, B); }}",
"e": 30491,
"s": 27938,
"text": null
},
{
"code": "# Python program to find the values# strictly greater than the element# and present in the arrayfrom typing import Listfrom bisect import bisect_right # Function to find the values# strictly greater than the element# and present in the arraydef operations(n: int, A: List[int], B: List[int]) -> None: # Treeset to store the # values of the array A tree = set() # HashMap to store the frequencies # of the values in array A freqMap = dict() # Iterating through the array # and add values in the treeset for j in range(n): x = A[j] tree.add(x) if x not in freqMap: freqMap[x] = 0 freqMap[x] += 1 # Finding the strictly greater value # in the array A[] using \"higher()\" # function and also reducing the # frequency of that value because it # has to be used only once for j in range(n): x = B[j] # If the higher value exists sset = sorted(list(tree)) index = bisect_right(sset, x) if index < len(tree): print(sset[index], end=\" \") # If the frequency value is 1 # then remove it from treeset # because it has been used # and its frequency becomes 0 if (freqMap[sset[index]] == 1): tree.remove(sset[index]) # Else, reducing the frequency # by 1 else: freqMap[sset[index]] -= 1 # If the value is not present # then print null else: print(\"null\", end=\" \") # Driver codeif __name__ == \"__main__\": n = 12 A = [9, 5, 100, 4, 89, 2, 0, 2, 89, 77, 77, 77] B = [0, 18, 60, 34, 50, 29, 4, 20, 48, 77, 2, 8] operations(n, A, B) # This code is contributed by sanjeev2552",
"e": 32247,
"s": 30491,
"text": null
},
{
"code": null,
"e": 32284,
"s": 32247,
"text": "2 77 77 77 89 89 5 100 null null 4 9"
},
{
"code": null,
"e": 32383,
"s": 32286,
"text": "Time Complexity: O(N * log(N)) because the insertion of one element takes log(N) in a tree set. "
},
{
"code": null,
"e": 32397,
"s": 32383,
"text": "Stream_Cipher"
},
{
"code": null,
"e": 32409,
"s": 32397,
"text": "sanjeev2552"
},
{
"code": null,
"e": 32428,
"s": 32409,
"text": "frequency-counting"
},
{
"code": null,
"e": 32441,
"s": 32428,
"text": "Java-HashMap"
},
{
"code": null,
"e": 32454,
"s": 32441,
"text": "java-treeset"
},
{
"code": null,
"e": 32478,
"s": 32454,
"text": "Advanced Data Structure"
},
{
"code": null,
"e": 32485,
"s": 32478,
"text": "Arrays"
},
{
"code": null,
"e": 32490,
"s": 32485,
"text": "Hash"
},
{
"code": null,
"e": 32495,
"s": 32490,
"text": "Tree"
},
{
"code": null,
"e": 32502,
"s": 32495,
"text": "Arrays"
},
{
"code": null,
"e": 32507,
"s": 32502,
"text": "Hash"
},
{
"code": null,
"e": 32512,
"s": 32507,
"text": "Tree"
},
{
"code": null,
"e": 32610,
"s": 32512,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 32619,
"s": 32610,
"text": "Comments"
},
{
"code": null,
"e": 32632,
"s": 32619,
"text": "Old Comments"
},
{
"code": null,
"e": 32678,
"s": 32632,
"text": "Extendible Hashing (Dynamic approach to DBMS)"
},
{
"code": null,
"e": 32698,
"s": 32678,
"text": "Ternary Search Tree"
},
{
"code": null,
"e": 32748,
"s": 32698,
"text": "Proof that Dominant Set of a Graph is NP-Complete"
},
{
"code": null,
"e": 32790,
"s": 32748,
"text": "2-3 Trees | (Search, Insert and Deletion)"
},
{
"code": null,
"e": 32824,
"s": 32790,
"text": "Advantages of Trie Data Structure"
},
{
"code": null,
"e": 32839,
"s": 32824,
"text": "Arrays in Java"
},
{
"code": null,
"e": 32855,
"s": 32839,
"text": "Arrays in C/C++"
},
{
"code": null,
"e": 32882,
"s": 32855,
"text": "Program for array rotation"
},
{
"code": null,
"e": 32930,
"s": 32882,
"text": "Stack Data Structure (Introduction and Program)"
}
] |
Perl - Regular Expressions
|
A regular expression is a string of characters that defines the pattern or patterns you are viewing. The syntax of regular expressions in Perl is very similar to what you will find within other regular expression.supporting programs, such as sed, grep, and awk.
The basic method for applying a regular expression is to use the pattern binding operators =~ and !~. The first operator is a test and assignment operator.
There are three regular expression operators within Perl.
Match Regular Expression - m//
Substitute Regular Expression - s///
Transliterate Regular Expression - tr///
The forward slashes in each case act as delimiters for the regular expression (regex) that you are specifying. If you are comfortable with any other delimiter, then you can use in place of forward slash.
The match operator, m//, is used to match a string or statement to a regular expression. For example, to match the character sequence "foo" against the scalar $bar, you might use a statement like this β
#!/usr/bin/perl
$bar = "This is foo and again foo";
if ($bar =~ /foo/) {
print "First time is matching\n";
} else {
print "First time is not matching\n";
}
$bar = "foo";
if ($bar =~ /foo/) {
print "Second time is matching\n";
} else {
print "Second time is not matching\n";
}
When above program is executed, it produces the following result β
First time is matching
Second time is matching
The m// actually works in the same fashion as the q// operator series.you can use any combination of naturally matching characters to act as delimiters for the expression. For example, m{}, m(), and m>< are all valid. So above example can be re-written as follows β
#!/usr/bin/perl
$bar = "This is foo and again foo";
if ($bar =~ m[foo]) {
print "First time is matching\n";
} else {
print "First time is not matching\n";
}
$bar = "foo";
if ($bar =~ m{foo}) {
print "Second time is matching\n";
} else {
print "Second time is not matching\n";
}
You can omit m from m// if the delimiters are forward slashes, but for all other delimiters you must use the m prefix.
Note that the entire match expression, that is the expression on the left of =~ or !~ and the match operator, returns true (in a scalar context) if the expression matches. Therefore the statement β
$true = ($foo =~ m/foo/);
will set $true to 1 if $foo matches the regex, or 0 if the match fails. In a list context, the match returns the contents of any grouped expressions. For example, when extracting the hours, minutes, and seconds from a time string, we can use β
my ($hours, $minutes, $seconds) = ($time =~ m/(\d+):(\d+):(\d+)/);
The match operator supports its own set of modifiers. The /g modifier allows for global matching. The /i modifier will make the match case insensitive. Here is the complete list of modifiers
i
Makes the match case insensitive.
m
Specifies that if the string has newline or carriage return characters, the ^ and $ operators will now match against a newline boundary, instead of a string boundary.
o
Evaluates the expression only once.
s
Allows use of . to match a newline character.
x
Allows you to use white space in the expression for clarity.
g
Globally finds all matches.
cg
Allows the search to continue even after a global match fails.
There is also a simpler version of the match operator - the ?PATTERN? operator. This is basically identical to the m// operator except that it only matches once within the string you are searching between each call to reset.
For example, you can use this to get the first and last elements within a list β
#!/usr/bin/perl
@list = qw/food foosball subeo footnote terfoot canic footbrdige/;
foreach (@list) {
$first = $1 if /(foo.*?)/;
$last = $1 if /(foo.*)/;
}
print "First: $first, Last: $last\n";
When above program is executed, it produces the following result β
First: foo, Last: footbrdige
Regular expression variables include $, which contains whatever the last grouping match matched; $&, which contains the entire matched string; $`, which contains everything before the matched string; and $', which contains everything after the matched string. Following code demonstrates the result β
#!/usr/bin/perl
$string = "The food is in the salad bar";
$string =~ m/foo/;
print "Before: $`\n";
print "Matched: $&\n";
print "After: $'\n";
When above program is executed, it produces the following result β
Before: The
Matched: foo
After: d is in the salad bar
The substitution operator, s///, is really just an extension of the match operator that allows you to replace the text matched with some new text. The basic form of the operator is β
s/PATTERN/REPLACEMENT/;
The PATTERN is the regular expression for the text that we are looking for. The REPLACEMENT is a specification for the text or regular expression that we want to use to replace the found text with. For example, we can replace all occurrences of dog with cat using the following regular expression β
#/user/bin/perl
$string = "The cat sat on the mat";
$string =~ s/cat/dog/;
print "$string\n";
When above program is executed, it produces the following result β
The dog sat on the mat
Here is the list of all the modifiers used with substitution operator.
i
Makes the match case insensitive.
m
Specifies that if the string has newline or carriage return characters, the ^ and $ operators will now match against a newline boundary, instead of a string boundary.
o
Evaluates the expression only once.
s
Allows use of . to match a newline character.
x
Allows you to use white space in the expression for clarity.
g
Replaces all occurrences of the found expression with the replacement text.
e
Evaluates the replacement as if it were a Perl statement, and uses its return value as the replacement text.
Translation is similar, but not identical, to the principles of substitution, but unlike substitution, translation (or transliteration) does not use regular expressions for its search on replacement values. The translation operators are β
tr/SEARCHLIST/REPLACEMENTLIST/cds
y/SEARCHLIST/REPLACEMENTLIST/cds
The translation replaces all occurrences of the characters in SEARCHLIST with the corresponding characters in REPLACEMENTLIST. For example, using the "The cat sat on the mat." string we have been using in this chapter β
#/user/bin/perl
$string = 'The cat sat on the mat';
$string =~ tr/a/o/;
print "$string\n";
When above program is executed, it produces the following result β
The cot sot on the mot.
Standard Perl ranges can also be used, allowing you to specify ranges of characters either by letter or numerical value. To change the case of the string, you might use the following syntax in place of the uc function.
$string =~ tr/a-z/A-Z/;
Following is the list of operators related to translation.
c
Complements SEARCHLIST.
d
Deletes found but unreplaced characters.
s
Squashes duplicate replaced characters.
The /d modifier deletes the characters matching SEARCHLIST that do not have a corresponding entry in REPLACEMENTLIST. For example β
#!/usr/bin/perl
$string = 'the cat sat on the mat.';
$string =~ tr/a-z/b/d;
print "$string\n";
When above program is executed, it produces the following result β
b b b.
The last modifier, /s, removes the duplicate sequences of characters that were replaced, so β
#!/usr/bin/perl
$string = 'food';
$string = 'food';
$string =~ tr/a-z/a-z/s;
print "$string\n";
When above program is executed, it produces the following result β
fod
You don't just have to match on fixed strings. In fact, you can match on just about anything you could dream of by using more complex regular expressions. Here's a quick cheat sheet β
Following table lists the regular expression syntax that is available in Python.
^
Matches beginning of line.
$
Matches end of line.
.
Matches any single character except newline. Using m option allows it to match newline as well.
[...]
Matches any single character in brackets.
[^...]
Matches any single character not in brackets.
*
Matches 0 or more occurrences of preceding expression.
+
Matches 1 or more occurrence of preceding expression.
?
Matches 0 or 1 occurrence of preceding expression.
{ n}
Matches exactly n number of occurrences of preceding expression.
{ n,}
Matches n or more occurrences of preceding expression.
{ n, m}
Matches at least n and at most m occurrences of preceding expression.
a| b
Matches either a or b.
\w
Matches word characters.
\W
Matches nonword characters.
\s
Matches whitespace. Equivalent to [\t\n\r\f].
\S
Matches nonwhitespace.
\d
Matches digits. Equivalent to [0-9].
\D
Matches nondigits.
\A
Matches beginning of string.
\Z
Matches end of string. If a newline exists, it matches just before newline.
\z
Matches end of string.
\G
Matches point where last match finished.
\b
Matches word boundaries when outside brackets. Matches backspace (0x08) when inside brackets.
\B
Matches nonword boundaries.
\n, \t, etc.
Matches newlines, carriage returns, tabs, etc.
\1...\9
Matches nth grouped subexpression.
\10
Matches nth grouped subexpression if it matched already. Otherwise refers to the octal representation of a character code.
[aeiou]
Matches a single character in the given set
[^aeiou]
Matches a single character outside the given set
The ^ metacharacter matches the beginning of the string and the $ metasymbol matches the end of the string. Here are some brief examples.
# nothing in the string (start and end are adjacent)
/^$/
# a three digits, each followed by a whitespace
# character (eg "3 4 5 ")
/(\d\s) {3}/
# matches a string in which every
# odd-numbered letter is a (eg "abacadaf")
/(a.)+/
# string starts with one or more digits
/^\d+/
# string that ends with one or more digits
/\d+$/
Lets have a look at another example.
#!/usr/bin/perl
$string = "Cats go Catatonic\nWhen given Catnip";
($start) = ($string =~ /\A(.*?) /);
@lines = $string =~ /^(.*?) /gm;
print "First word: $start\n","Line starts: @lines\n";
When above program is executed, it produces the following result β
First word: Cats
Line starts: Cats When
The \b matches at any word boundary, as defined by the difference between the \w class and the \W class. Because \w includes the characters for a word, and \W the opposite, this normally means the termination of a word. The \B assertion matches any position that is not a word boundary. For example β
/\bcat\b/ # Matches 'the cat sat' but not 'cat on the mat'
/\Bcat\B/ # Matches 'verification' but not 'the cat on the mat'
/\bcat\B/ # Matches 'catatonic' but not 'polecat'
/\Bcat\b/ # Matches 'polecat' but not 'catatonic'
The | character is just like the standard or bitwise OR within Perl. It specifies alternate matches within a regular expression or group. For example, to match "cat" or "dog" in an expression, you might use this β
if ($string =~ /cat|dog/)
You can group individual elements of an expression together in order to support complex matches. Searching for two peopleβs names could be achieved with two separate tests, like this β
if (($string =~ /Martin Brown/) || ($string =~ /Sharon Brown/))
This could be written as follows
if ($string =~ /(Martin|Sharon) Brown/)
From a regular-expression point of view, there is no difference between except, perhaps, that the former is slightly clearer.
$string =~ /(\S+)\s+(\S+)/;
and
$string =~ /\S+\s+\S+/;
However, the benefit of grouping is that it allows us to extract a sequence from a regular expression. Groupings are returned as a list in the order in which they appear in the original. For example, in the following fragment we have pulled out the hours, minutes, and seconds from a string.
my ($hours, $minutes, $seconds) = ($time =~ m/(\d+):(\d+):(\d+)/);
As well as this direct method, matched groups are also available within the special $x variables, where x is the number of the group within the regular expression. We could therefore rewrite the preceding example as follows β
#!/usr/bin/perl
$time = "12:05:30";
$time =~ m/(\d+):(\d+):(\d+)/;
my ($hours, $minutes, $seconds) = ($1, $2, $3);
print "Hours : $hours, Minutes: $minutes, Second: $seconds\n";
When above program is executed, it produces the following result β
Hours : 12, Minutes: 05, Second: 30
When groups are used in substitution expressions, the $x syntax can be used in the replacement text. Thus, we could reformat a date string using this β
#!/usr/bin/perl
$date = '03/26/1999';
$date =~ s#(\d+)/(\d+)/(\d+)#$3/$1/$2#;
print "$date\n";
When above program is executed, it produces the following result β
1999/03/26
The \G assertion allows you to continue searching from the point where the last match occurred. For example, in the following code, we have used \G so that we can search to the correct position and then extract some information, without having to create a more complex, single regular expression β
#!/usr/bin/perl
$string = "The time is: 12:31:02 on 4/12/00";
$string =~ /:\s+/g;
($time) = ($string =~ /\G(\d+:\d+:\d+)/);
$string =~ /.+\s+/g;
($date) = ($string =~ m{\G(\d+/\d+/\d+)});
print "Time: $time, Date: $date\n";
When above program is executed, it produces the following result β
Time: 12:31:02, Date: 4/12/00
The \G assertion is actually just the metasymbol equivalent of the pos function, so between regular expression calls you can continue to use pos, and even modify the value of pos (and therefore \G) by using pos as an lvalue subroutine.
Perl
Match "Perl".
[Pp]ython
Matches "Python" or "python"
rub[ye]
Matches "ruby" or "rube"
[aeiou]
Matches any one lowercase vowel
[0-9]
Matches any digit; same as [0123456789]
[a-z]
Matches any lowercase ASCII letter
[A-Z]
Matches any uppercase ASCII letter
[a-zA-Z0-9]
Matches any of the above
[^aeiou]
Matches anything other than a lowercase vowel
[^0-9]
Matches anything other than a digit
.
Matches any character except newline
\d
Matches a digit: [0-9]
\D
Matches a nondigit: [^0-9]
\s
Matches a whitespace character: [ \t\r\n\f]
\S
Matches nonwhitespace: [^ \t\r\n\f]
\w
Matches a single word character: [A-Za-z0-9_]
\W
Matches a nonword character: [^A-Za-z0-9_]
ruby?
Matches "rub" or "ruby": the y is optional
ruby*
Matches "rub" plus 0 or more ys
ruby+
Matches "rub" plus 1 or more ys
\d{3}
Matches exactly 3 digits
\d{3,}
Matches 3 or more digits
\d{3,5}
Matches 3, 4, or 5 digits
This matches the smallest number of repetitions β
<.*>
Greedy repetition: matches "<python>perl>"
<.*?>
Nongreedy: matches "<python>" in "<python>perl>"
\D\d+
No group: + repeats \d
(\D\d)+
Grouped: + repeats \D\d pair
([Pp]ython(, )?)+
Match "Python", "Python, python, python", etc.
This matches a previously matched group again β
([Pp])ython&\1ails
Matches python&pails or Python&Pails
(['"])[^\1]*\1
Single or double-quoted string. \1 matches whatever the 1st group matched. \2 matches whatever the 2nd group matched, etc.
python|perl
Matches "python" or "perl"
rub(y|le))
Matches "ruby" or "ruble"
Python(!+|\?)
"Python" followed by one or more ! or one ?
This need to specify match positions.
^Python
Matches "Python" at the start of a string or internal line
Python$
Matches "Python" at the end of a string or line
\APython
Matches "Python" at the start of a string
Python\Z
Matches "Python" at the end of a string
\bPython\b
Matches "Python" at a word boundary
\brub\B
\B is nonword boundary: match "rub" in "rube" and "ruby" but not alone
Python(?=!)
Matches "Python", if followed by an exclamation point
Python(?!!)
Matches "Python", if not followed by an exclamation point
R(?#comment)
Matches "R". All the rest is a comment
R(?i)uby
Case-insensitive while matching "uby"
R(?i:uby)
Same as above
rub(?:y|le))
Group only without creating \1 backreference
46 Lectures
4.5 hours
Devi Killada
11 Lectures
1.5 hours
Harshit Srivastava
30 Lectures
6 hours
TELCOMA Global
24 Lectures
2 hours
Mohammad Nauman
68 Lectures
7 hours
Stone River ELearning
58 Lectures
6.5 hours
Stone River ELearning
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2482,
"s": 2220,
"text": "A regular expression is a string of characters that defines the pattern or patterns you are viewing. The syntax of regular expressions in Perl is very similar to what you will find within other regular expression.supporting programs, such as sed, grep, and awk."
},
{
"code": null,
"e": 2638,
"s": 2482,
"text": "The basic method for applying a regular expression is to use the pattern binding operators =~ and !~. The first operator is a test and assignment operator."
},
{
"code": null,
"e": 2696,
"s": 2638,
"text": "There are three regular expression operators within Perl."
},
{
"code": null,
"e": 2727,
"s": 2696,
"text": "Match Regular Expression - m//"
},
{
"code": null,
"e": 2764,
"s": 2727,
"text": "Substitute Regular Expression - s///"
},
{
"code": null,
"e": 2805,
"s": 2764,
"text": "Transliterate Regular Expression - tr///"
},
{
"code": null,
"e": 3009,
"s": 2805,
"text": "The forward slashes in each case act as delimiters for the regular expression (regex) that you are specifying. If you are comfortable with any other delimiter, then you can use in place of forward slash."
},
{
"code": null,
"e": 3212,
"s": 3009,
"text": "The match operator, m//, is used to match a string or statement to a regular expression. For example, to match the character sequence \"foo\" against the scalar $bar, you might use a statement like this β"
},
{
"code": null,
"e": 3502,
"s": 3212,
"text": "#!/usr/bin/perl\n\n$bar = \"This is foo and again foo\";\nif ($bar =~ /foo/) {\n print \"First time is matching\\n\";\n} else {\n print \"First time is not matching\\n\";\n}\n\n$bar = \"foo\";\nif ($bar =~ /foo/) {\n print \"Second time is matching\\n\";\n} else {\n print \"Second time is not matching\\n\";\n}"
},
{
"code": null,
"e": 3569,
"s": 3502,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 3617,
"s": 3569,
"text": "First time is matching\nSecond time is matching\n"
},
{
"code": null,
"e": 3883,
"s": 3617,
"text": "The m// actually works in the same fashion as the q// operator series.you can use any combination of naturally matching characters to act as delimiters for the expression. For example, m{}, m(), and m>< are all valid. So above example can be re-written as follows β"
},
{
"code": null,
"e": 4175,
"s": 3883,
"text": "#!/usr/bin/perl\n\n$bar = \"This is foo and again foo\";\nif ($bar =~ m[foo]) {\n print \"First time is matching\\n\";\n} else {\n print \"First time is not matching\\n\";\n}\n\n$bar = \"foo\";\nif ($bar =~ m{foo}) {\n print \"Second time is matching\\n\";\n} else {\n print \"Second time is not matching\\n\";\n}"
},
{
"code": null,
"e": 4294,
"s": 4175,
"text": "You can omit m from m// if the delimiters are forward slashes, but for all other delimiters you must use the m prefix."
},
{
"code": null,
"e": 4492,
"s": 4294,
"text": "Note that the entire match expression, that is the expression on the left of =~ or !~ and the match operator, returns true (in a scalar context) if the expression matches. Therefore the statement β"
},
{
"code": null,
"e": 4518,
"s": 4492,
"text": "$true = ($foo =~ m/foo/);"
},
{
"code": null,
"e": 4762,
"s": 4518,
"text": "will set $true to 1 if $foo matches the regex, or 0 if the match fails. In a list context, the match returns the contents of any grouped expressions. For example, when extracting the hours, minutes, and seconds from a time string, we can use β"
},
{
"code": null,
"e": 4829,
"s": 4762,
"text": "my ($hours, $minutes, $seconds) = ($time =~ m/(\\d+):(\\d+):(\\d+)/);"
},
{
"code": null,
"e": 5020,
"s": 4829,
"text": "The match operator supports its own set of modifiers. The /g modifier allows for global matching. The /i modifier will make the match case insensitive. Here is the complete list of modifiers"
},
{
"code": null,
"e": 5022,
"s": 5020,
"text": "i"
},
{
"code": null,
"e": 5056,
"s": 5022,
"text": "Makes the match case insensitive."
},
{
"code": null,
"e": 5058,
"s": 5056,
"text": "m"
},
{
"code": null,
"e": 5225,
"s": 5058,
"text": "Specifies that if the string has newline or carriage return characters, the ^ and $ operators will now match against a newline boundary, instead of a string boundary."
},
{
"code": null,
"e": 5227,
"s": 5225,
"text": "o"
},
{
"code": null,
"e": 5263,
"s": 5227,
"text": "Evaluates the expression only once."
},
{
"code": null,
"e": 5265,
"s": 5263,
"text": "s"
},
{
"code": null,
"e": 5311,
"s": 5265,
"text": "Allows use of . to match a newline character."
},
{
"code": null,
"e": 5313,
"s": 5311,
"text": "x"
},
{
"code": null,
"e": 5374,
"s": 5313,
"text": "Allows you to use white space in the expression for clarity."
},
{
"code": null,
"e": 5376,
"s": 5374,
"text": "g"
},
{
"code": null,
"e": 5404,
"s": 5376,
"text": "Globally finds all matches."
},
{
"code": null,
"e": 5407,
"s": 5404,
"text": "cg"
},
{
"code": null,
"e": 5470,
"s": 5407,
"text": "Allows the search to continue even after a global match fails."
},
{
"code": null,
"e": 5695,
"s": 5470,
"text": "There is also a simpler version of the match operator - the ?PATTERN? operator. This is basically identical to the m// operator except that it only matches once within the string you are searching between each call to reset."
},
{
"code": null,
"e": 5776,
"s": 5695,
"text": "For example, you can use this to get the first and last elements within a list β"
},
{
"code": null,
"e": 5977,
"s": 5776,
"text": "#!/usr/bin/perl\n\n@list = qw/food foosball subeo footnote terfoot canic footbrdige/;\n\nforeach (@list) {\n $first = $1 if /(foo.*?)/;\n $last = $1 if /(foo.*)/;\n}\nprint \"First: $first, Last: $last\\n\";"
},
{
"code": null,
"e": 6044,
"s": 5977,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 6074,
"s": 6044,
"text": "First: foo, Last: footbrdige\n"
},
{
"code": null,
"e": 6375,
"s": 6074,
"text": "Regular expression variables include $, which contains whatever the last grouping match matched; $&, which contains the entire matched string; $`, which contains everything before the matched string; and $', which contains everything after the matched string. Following code demonstrates the result β"
},
{
"code": null,
"e": 6519,
"s": 6375,
"text": "#!/usr/bin/perl\n\n$string = \"The food is in the salad bar\";\n$string =~ m/foo/;\nprint \"Before: $`\\n\";\nprint \"Matched: $&\\n\";\nprint \"After: $'\\n\";"
},
{
"code": null,
"e": 6586,
"s": 6519,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 6641,
"s": 6586,
"text": "Before: The\nMatched: foo\nAfter: d is in the salad bar\n"
},
{
"code": null,
"e": 6824,
"s": 6641,
"text": "The substitution operator, s///, is really just an extension of the match operator that allows you to replace the text matched with some new text. The basic form of the operator is β"
},
{
"code": null,
"e": 6849,
"s": 6824,
"text": "s/PATTERN/REPLACEMENT/;\n"
},
{
"code": null,
"e": 7148,
"s": 6849,
"text": "The PATTERN is the regular expression for the text that we are looking for. The REPLACEMENT is a specification for the text or regular expression that we want to use to replace the found text with. For example, we can replace all occurrences of dog with cat using the following regular expression β"
},
{
"code": null,
"e": 7244,
"s": 7148,
"text": "#/user/bin/perl\n\n$string = \"The cat sat on the mat\";\n$string =~ s/cat/dog/;\n\nprint \"$string\\n\";"
},
{
"code": null,
"e": 7311,
"s": 7244,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 7335,
"s": 7311,
"text": "The dog sat on the mat\n"
},
{
"code": null,
"e": 7406,
"s": 7335,
"text": "Here is the list of all the modifiers used with substitution operator."
},
{
"code": null,
"e": 7408,
"s": 7406,
"text": "i"
},
{
"code": null,
"e": 7442,
"s": 7408,
"text": "Makes the match case insensitive."
},
{
"code": null,
"e": 7444,
"s": 7442,
"text": "m"
},
{
"code": null,
"e": 7611,
"s": 7444,
"text": "Specifies that if the string has newline or carriage return characters, the ^ and $ operators will now match against a newline boundary, instead of a string boundary."
},
{
"code": null,
"e": 7613,
"s": 7611,
"text": "o"
},
{
"code": null,
"e": 7649,
"s": 7613,
"text": "Evaluates the expression only once."
},
{
"code": null,
"e": 7651,
"s": 7649,
"text": "s"
},
{
"code": null,
"e": 7697,
"s": 7651,
"text": "Allows use of . to match a newline character."
},
{
"code": null,
"e": 7699,
"s": 7697,
"text": "x"
},
{
"code": null,
"e": 7760,
"s": 7699,
"text": "Allows you to use white space in the expression for clarity."
},
{
"code": null,
"e": 7762,
"s": 7760,
"text": "g"
},
{
"code": null,
"e": 7838,
"s": 7762,
"text": "Replaces all occurrences of the found expression with the replacement text."
},
{
"code": null,
"e": 7840,
"s": 7838,
"text": "e"
},
{
"code": null,
"e": 7949,
"s": 7840,
"text": "Evaluates the replacement as if it were a Perl statement, and uses its return value as the replacement text."
},
{
"code": null,
"e": 8188,
"s": 7949,
"text": "Translation is similar, but not identical, to the principles of substitution, but unlike substitution, translation (or transliteration) does not use regular expressions for its search on replacement values. The translation operators are β"
},
{
"code": null,
"e": 8255,
"s": 8188,
"text": "tr/SEARCHLIST/REPLACEMENTLIST/cds\ny/SEARCHLIST/REPLACEMENTLIST/cds"
},
{
"code": null,
"e": 8475,
"s": 8255,
"text": "The translation replaces all occurrences of the characters in SEARCHLIST with the corresponding characters in REPLACEMENTLIST. For example, using the \"The cat sat on the mat.\" string we have been using in this chapter β"
},
{
"code": null,
"e": 8568,
"s": 8475,
"text": "#/user/bin/perl\n\n$string = 'The cat sat on the mat';\n$string =~ tr/a/o/;\n\nprint \"$string\\n\";"
},
{
"code": null,
"e": 8635,
"s": 8568,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 8660,
"s": 8635,
"text": "The cot sot on the mot.\n"
},
{
"code": null,
"e": 8879,
"s": 8660,
"text": "Standard Perl ranges can also be used, allowing you to specify ranges of characters either by letter or numerical value. To change the case of the string, you might use the following syntax in place of the uc function."
},
{
"code": null,
"e": 8903,
"s": 8879,
"text": "$string =~ tr/a-z/A-Z/;"
},
{
"code": null,
"e": 8962,
"s": 8903,
"text": "Following is the list of operators related to translation."
},
{
"code": null,
"e": 8964,
"s": 8962,
"text": "c"
},
{
"code": null,
"e": 8988,
"s": 8964,
"text": "Complements SEARCHLIST."
},
{
"code": null,
"e": 8990,
"s": 8988,
"text": "d"
},
{
"code": null,
"e": 9031,
"s": 8990,
"text": "Deletes found but unreplaced characters."
},
{
"code": null,
"e": 9033,
"s": 9031,
"text": "s"
},
{
"code": null,
"e": 9073,
"s": 9033,
"text": "Squashes duplicate replaced characters."
},
{
"code": null,
"e": 9205,
"s": 9073,
"text": "The /d modifier deletes the characters matching SEARCHLIST that do not have a corresponding entry in REPLACEMENTLIST. For example β"
},
{
"code": null,
"e": 9303,
"s": 9205,
"text": "#!/usr/bin/perl \n\n$string = 'the cat sat on the mat.';\n$string =~ tr/a-z/b/d;\n\nprint \"$string\\n\";"
},
{
"code": null,
"e": 9370,
"s": 9303,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 9380,
"s": 9370,
"text": "b b b.\n"
},
{
"code": null,
"e": 9474,
"s": 9380,
"text": "The last modifier, /s, removes the duplicate sequences of characters that were replaced, so β"
},
{
"code": null,
"e": 9572,
"s": 9474,
"text": "#!/usr/bin/perl\n\n$string = 'food';\n$string = 'food';\n$string =~ tr/a-z/a-z/s;\n\nprint \"$string\\n\";"
},
{
"code": null,
"e": 9639,
"s": 9572,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 9644,
"s": 9639,
"text": "fod\n"
},
{
"code": null,
"e": 9828,
"s": 9644,
"text": "You don't just have to match on fixed strings. In fact, you can match on just about anything you could dream of by using more complex regular expressions. Here's a quick cheat sheet β"
},
{
"code": null,
"e": 9909,
"s": 9828,
"text": "Following table lists the regular expression syntax that is available in Python."
},
{
"code": null,
"e": 9911,
"s": 9909,
"text": "^"
},
{
"code": null,
"e": 9938,
"s": 9911,
"text": "Matches beginning of line."
},
{
"code": null,
"e": 9940,
"s": 9938,
"text": "$"
},
{
"code": null,
"e": 9961,
"s": 9940,
"text": "Matches end of line."
},
{
"code": null,
"e": 9963,
"s": 9961,
"text": "."
},
{
"code": null,
"e": 10059,
"s": 9963,
"text": "Matches any single character except newline. Using m option allows it to match newline as well."
},
{
"code": null,
"e": 10065,
"s": 10059,
"text": "[...]"
},
{
"code": null,
"e": 10107,
"s": 10065,
"text": "Matches any single character in brackets."
},
{
"code": null,
"e": 10114,
"s": 10107,
"text": "[^...]"
},
{
"code": null,
"e": 10160,
"s": 10114,
"text": "Matches any single character not in brackets."
},
{
"code": null,
"e": 10162,
"s": 10160,
"text": "*"
},
{
"code": null,
"e": 10217,
"s": 10162,
"text": "Matches 0 or more occurrences of preceding expression."
},
{
"code": null,
"e": 10219,
"s": 10217,
"text": "+"
},
{
"code": null,
"e": 10273,
"s": 10219,
"text": "Matches 1 or more occurrence of preceding expression."
},
{
"code": null,
"e": 10275,
"s": 10273,
"text": "?"
},
{
"code": null,
"e": 10326,
"s": 10275,
"text": "Matches 0 or 1 occurrence of preceding expression."
},
{
"code": null,
"e": 10331,
"s": 10326,
"text": "{ n}"
},
{
"code": null,
"e": 10396,
"s": 10331,
"text": "Matches exactly n number of occurrences of preceding expression."
},
{
"code": null,
"e": 10402,
"s": 10396,
"text": "{ n,}"
},
{
"code": null,
"e": 10457,
"s": 10402,
"text": "Matches n or more occurrences of preceding expression."
},
{
"code": null,
"e": 10465,
"s": 10457,
"text": "{ n, m}"
},
{
"code": null,
"e": 10535,
"s": 10465,
"text": "Matches at least n and at most m occurrences of preceding expression."
},
{
"code": null,
"e": 10540,
"s": 10535,
"text": "a| b"
},
{
"code": null,
"e": 10563,
"s": 10540,
"text": "Matches either a or b."
},
{
"code": null,
"e": 10566,
"s": 10563,
"text": "\\w"
},
{
"code": null,
"e": 10591,
"s": 10566,
"text": "Matches word characters."
},
{
"code": null,
"e": 10594,
"s": 10591,
"text": "\\W"
},
{
"code": null,
"e": 10622,
"s": 10594,
"text": "Matches nonword characters."
},
{
"code": null,
"e": 10625,
"s": 10622,
"text": "\\s"
},
{
"code": null,
"e": 10671,
"s": 10625,
"text": "Matches whitespace. Equivalent to [\\t\\n\\r\\f]."
},
{
"code": null,
"e": 10674,
"s": 10671,
"text": "\\S"
},
{
"code": null,
"e": 10697,
"s": 10674,
"text": "Matches nonwhitespace."
},
{
"code": null,
"e": 10700,
"s": 10697,
"text": "\\d"
},
{
"code": null,
"e": 10737,
"s": 10700,
"text": "Matches digits. Equivalent to [0-9]."
},
{
"code": null,
"e": 10740,
"s": 10737,
"text": "\\D"
},
{
"code": null,
"e": 10759,
"s": 10740,
"text": "Matches nondigits."
},
{
"code": null,
"e": 10762,
"s": 10759,
"text": "\\A"
},
{
"code": null,
"e": 10791,
"s": 10762,
"text": "Matches beginning of string."
},
{
"code": null,
"e": 10794,
"s": 10791,
"text": "\\Z"
},
{
"code": null,
"e": 10870,
"s": 10794,
"text": "Matches end of string. If a newline exists, it matches just before newline."
},
{
"code": null,
"e": 10873,
"s": 10870,
"text": "\\z"
},
{
"code": null,
"e": 10896,
"s": 10873,
"text": "Matches end of string."
},
{
"code": null,
"e": 10899,
"s": 10896,
"text": "\\G"
},
{
"code": null,
"e": 10940,
"s": 10899,
"text": "Matches point where last match finished."
},
{
"code": null,
"e": 10943,
"s": 10940,
"text": "\\b"
},
{
"code": null,
"e": 11037,
"s": 10943,
"text": "Matches word boundaries when outside brackets. Matches backspace (0x08) when inside brackets."
},
{
"code": null,
"e": 11040,
"s": 11037,
"text": "\\B"
},
{
"code": null,
"e": 11068,
"s": 11040,
"text": "Matches nonword boundaries."
},
{
"code": null,
"e": 11081,
"s": 11068,
"text": "\\n, \\t, etc."
},
{
"code": null,
"e": 11128,
"s": 11081,
"text": "Matches newlines, carriage returns, tabs, etc."
},
{
"code": null,
"e": 11136,
"s": 11128,
"text": "\\1...\\9"
},
{
"code": null,
"e": 11171,
"s": 11136,
"text": "Matches nth grouped subexpression."
},
{
"code": null,
"e": 11175,
"s": 11171,
"text": "\\10"
},
{
"code": null,
"e": 11298,
"s": 11175,
"text": "Matches nth grouped subexpression if it matched already. Otherwise refers to the octal representation of a character code."
},
{
"code": null,
"e": 11306,
"s": 11298,
"text": "[aeiou]"
},
{
"code": null,
"e": 11350,
"s": 11306,
"text": "Matches a single character in the given set"
},
{
"code": null,
"e": 11359,
"s": 11350,
"text": "[^aeiou]"
},
{
"code": null,
"e": 11408,
"s": 11359,
"text": "Matches a single character outside the given set"
},
{
"code": null,
"e": 11546,
"s": 11408,
"text": "The ^ metacharacter matches the beginning of the string and the $ metasymbol matches the end of the string. Here are some brief examples."
},
{
"code": null,
"e": 11884,
"s": 11546,
"text": "# nothing in the string (start and end are adjacent)\n/^$/ \n\n# a three digits, each followed by a whitespace\n# character (eg \"3 4 5 \")\n/(\\d\\s) {3}/ \n\n# matches a string in which every\n# odd-numbered letter is a (eg \"abacadaf\")\n/(a.)+/ \n\n# string starts with one or more digits\n/^\\d+/\n\n# string that ends with one or more digits\n/\\d+$/"
},
{
"code": null,
"e": 11921,
"s": 11884,
"text": "Lets have a look at another example."
},
{
"code": null,
"e": 12111,
"s": 11921,
"text": "#!/usr/bin/perl\n\n$string = \"Cats go Catatonic\\nWhen given Catnip\";\n($start) = ($string =~ /\\A(.*?) /);\n@lines = $string =~ /^(.*?) /gm;\nprint \"First word: $start\\n\",\"Line starts: @lines\\n\";"
},
{
"code": null,
"e": 12178,
"s": 12111,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 12219,
"s": 12178,
"text": "First word: Cats\nLine starts: Cats When\n"
},
{
"code": null,
"e": 12520,
"s": 12219,
"text": "The \\b matches at any word boundary, as defined by the difference between the \\w class and the \\W class. Because \\w includes the characters for a word, and \\W the opposite, this normally means the termination of a word. The \\B assertion matches any position that is not a word boundary. For example β"
},
{
"code": null,
"e": 12743,
"s": 12520,
"text": "/\\bcat\\b/ # Matches 'the cat sat' but not 'cat on the mat'\n/\\Bcat\\B/ # Matches 'verification' but not 'the cat on the mat'\n/\\bcat\\B/ # Matches 'catatonic' but not 'polecat'\n/\\Bcat\\b/ # Matches 'polecat' but not 'catatonic'"
},
{
"code": null,
"e": 12957,
"s": 12743,
"text": "The | character is just like the standard or bitwise OR within Perl. It specifies alternate matches within a regular expression or group. For example, to match \"cat\" or \"dog\" in an expression, you might use this β"
},
{
"code": null,
"e": 12983,
"s": 12957,
"text": "if ($string =~ /cat|dog/)"
},
{
"code": null,
"e": 13168,
"s": 12983,
"text": "You can group individual elements of an expression together in order to support complex matches. Searching for two peopleβs names could be achieved with two separate tests, like this β"
},
{
"code": null,
"e": 13308,
"s": 13168,
"text": "if (($string =~ /Martin Brown/) || ($string =~ /Sharon Brown/))\n\nThis could be written as follows\n\nif ($string =~ /(Martin|Sharon) Brown/)"
},
{
"code": null,
"e": 13434,
"s": 13308,
"text": "From a regular-expression point of view, there is no difference between except, perhaps, that the former is slightly clearer."
},
{
"code": null,
"e": 13493,
"s": 13434,
"text": "$string =~ /(\\S+)\\s+(\\S+)/;\n\nand \n\n$string =~ /\\S+\\s+\\S+/;"
},
{
"code": null,
"e": 13785,
"s": 13493,
"text": "However, the benefit of grouping is that it allows us to extract a sequence from a regular expression. Groupings are returned as a list in the order in which they appear in the original. For example, in the following fragment we have pulled out the hours, minutes, and seconds from a string."
},
{
"code": null,
"e": 13852,
"s": 13785,
"text": "my ($hours, $minutes, $seconds) = ($time =~ m/(\\d+):(\\d+):(\\d+)/);"
},
{
"code": null,
"e": 14078,
"s": 13852,
"text": "As well as this direct method, matched groups are also available within the special $x variables, where x is the number of the group within the regular expression. We could therefore rewrite the preceding example as follows β"
},
{
"code": null,
"e": 14259,
"s": 14078,
"text": "#!/usr/bin/perl\n\n$time = \"12:05:30\";\n\n$time =~ m/(\\d+):(\\d+):(\\d+)/;\nmy ($hours, $minutes, $seconds) = ($1, $2, $3);\n\nprint \"Hours : $hours, Minutes: $minutes, Second: $seconds\\n\";"
},
{
"code": null,
"e": 14326,
"s": 14259,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 14363,
"s": 14326,
"text": "Hours : 12, Minutes: 05, Second: 30\n"
},
{
"code": null,
"e": 14515,
"s": 14363,
"text": "When groups are used in substitution expressions, the $x syntax can be used in the replacement text. Thus, we could reformat a date string using this β"
},
{
"code": null,
"e": 14612,
"s": 14515,
"text": "#!/usr/bin/perl\n\n$date = '03/26/1999';\n$date =~ s#(\\d+)/(\\d+)/(\\d+)#$3/$1/$2#;\n\nprint \"$date\\n\";"
},
{
"code": null,
"e": 14679,
"s": 14612,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 14691,
"s": 14679,
"text": "1999/03/26\n"
},
{
"code": null,
"e": 14989,
"s": 14691,
"text": "The \\G assertion allows you to continue searching from the point where the last match occurred. For example, in the following code, we have used \\G so that we can search to the correct position and then extract some information, without having to create a more complex, single regular expression β"
},
{
"code": null,
"e": 15216,
"s": 14989,
"text": "#!/usr/bin/perl\n\n$string = \"The time is: 12:31:02 on 4/12/00\";\n\n$string =~ /:\\s+/g;\n($time) = ($string =~ /\\G(\\d+:\\d+:\\d+)/);\n$string =~ /.+\\s+/g;\n($date) = ($string =~ m{\\G(\\d+/\\d+/\\d+)});\n\nprint \"Time: $time, Date: $date\\n\";"
},
{
"code": null,
"e": 15283,
"s": 15216,
"text": "When above program is executed, it produces the following result β"
},
{
"code": null,
"e": 15314,
"s": 15283,
"text": "Time: 12:31:02, Date: 4/12/00\n"
},
{
"code": null,
"e": 15550,
"s": 15314,
"text": "The \\G assertion is actually just the metasymbol equivalent of the pos function, so between regular expression calls you can continue to use pos, and even modify the value of pos (and therefore \\G) by using pos as an lvalue subroutine."
},
{
"code": null,
"e": 15555,
"s": 15550,
"text": "Perl"
},
{
"code": null,
"e": 15569,
"s": 15555,
"text": "Match \"Perl\"."
},
{
"code": null,
"e": 15579,
"s": 15569,
"text": "[Pp]ython"
},
{
"code": null,
"e": 15608,
"s": 15579,
"text": "Matches \"Python\" or \"python\""
},
{
"code": null,
"e": 15616,
"s": 15608,
"text": "rub[ye]"
},
{
"code": null,
"e": 15641,
"s": 15616,
"text": "Matches \"ruby\" or \"rube\""
},
{
"code": null,
"e": 15649,
"s": 15641,
"text": "[aeiou]"
},
{
"code": null,
"e": 15681,
"s": 15649,
"text": "Matches any one lowercase vowel"
},
{
"code": null,
"e": 15687,
"s": 15681,
"text": "[0-9]"
},
{
"code": null,
"e": 15727,
"s": 15687,
"text": "Matches any digit; same as [0123456789]"
},
{
"code": null,
"e": 15733,
"s": 15727,
"text": "[a-z]"
},
{
"code": null,
"e": 15768,
"s": 15733,
"text": "Matches any lowercase ASCII letter"
},
{
"code": null,
"e": 15774,
"s": 15768,
"text": "[A-Z]"
},
{
"code": null,
"e": 15809,
"s": 15774,
"text": "Matches any uppercase ASCII letter"
},
{
"code": null,
"e": 15821,
"s": 15809,
"text": "[a-zA-Z0-9]"
},
{
"code": null,
"e": 15846,
"s": 15821,
"text": "Matches any of the above"
},
{
"code": null,
"e": 15855,
"s": 15846,
"text": "[^aeiou]"
},
{
"code": null,
"e": 15901,
"s": 15855,
"text": "Matches anything other than a lowercase vowel"
},
{
"code": null,
"e": 15908,
"s": 15901,
"text": "[^0-9]"
},
{
"code": null,
"e": 15944,
"s": 15908,
"text": "Matches anything other than a digit"
},
{
"code": null,
"e": 15946,
"s": 15944,
"text": "."
},
{
"code": null,
"e": 15983,
"s": 15946,
"text": "Matches any character except newline"
},
{
"code": null,
"e": 15986,
"s": 15983,
"text": "\\d"
},
{
"code": null,
"e": 16009,
"s": 15986,
"text": "Matches a digit: [0-9]"
},
{
"code": null,
"e": 16012,
"s": 16009,
"text": "\\D"
},
{
"code": null,
"e": 16039,
"s": 16012,
"text": "Matches a nondigit: [^0-9]"
},
{
"code": null,
"e": 16042,
"s": 16039,
"text": "\\s"
},
{
"code": null,
"e": 16086,
"s": 16042,
"text": "Matches a whitespace character: [ \\t\\r\\n\\f]"
},
{
"code": null,
"e": 16089,
"s": 16086,
"text": "\\S"
},
{
"code": null,
"e": 16125,
"s": 16089,
"text": "Matches nonwhitespace: [^ \\t\\r\\n\\f]"
},
{
"code": null,
"e": 16128,
"s": 16125,
"text": "\\w"
},
{
"code": null,
"e": 16174,
"s": 16128,
"text": "Matches a single word character: [A-Za-z0-9_]"
},
{
"code": null,
"e": 16177,
"s": 16174,
"text": "\\W"
},
{
"code": null,
"e": 16220,
"s": 16177,
"text": "Matches a nonword character: [^A-Za-z0-9_]"
},
{
"code": null,
"e": 16226,
"s": 16220,
"text": "ruby?"
},
{
"code": null,
"e": 16269,
"s": 16226,
"text": "Matches \"rub\" or \"ruby\": the y is optional"
},
{
"code": null,
"e": 16275,
"s": 16269,
"text": "ruby*"
},
{
"code": null,
"e": 16307,
"s": 16275,
"text": "Matches \"rub\" plus 0 or more ys"
},
{
"code": null,
"e": 16313,
"s": 16307,
"text": "ruby+"
},
{
"code": null,
"e": 16345,
"s": 16313,
"text": "Matches \"rub\" plus 1 or more ys"
},
{
"code": null,
"e": 16351,
"s": 16345,
"text": "\\d{3}"
},
{
"code": null,
"e": 16376,
"s": 16351,
"text": "Matches exactly 3 digits"
},
{
"code": null,
"e": 16383,
"s": 16376,
"text": "\\d{3,}"
},
{
"code": null,
"e": 16408,
"s": 16383,
"text": "Matches 3 or more digits"
},
{
"code": null,
"e": 16416,
"s": 16408,
"text": "\\d{3,5}"
},
{
"code": null,
"e": 16442,
"s": 16416,
"text": "Matches 3, 4, or 5 digits"
},
{
"code": null,
"e": 16492,
"s": 16442,
"text": "This matches the smallest number of repetitions β"
},
{
"code": null,
"e": 16497,
"s": 16492,
"text": "<.*>"
},
{
"code": null,
"e": 16540,
"s": 16497,
"text": "Greedy repetition: matches \"<python>perl>\""
},
{
"code": null,
"e": 16546,
"s": 16540,
"text": "<.*?>"
},
{
"code": null,
"e": 16595,
"s": 16546,
"text": "Nongreedy: matches \"<python>\" in \"<python>perl>\""
},
{
"code": null,
"e": 16601,
"s": 16595,
"text": "\\D\\d+"
},
{
"code": null,
"e": 16624,
"s": 16601,
"text": "No group: + repeats \\d"
},
{
"code": null,
"e": 16632,
"s": 16624,
"text": "(\\D\\d)+"
},
{
"code": null,
"e": 16661,
"s": 16632,
"text": "Grouped: + repeats \\D\\d pair"
},
{
"code": null,
"e": 16679,
"s": 16661,
"text": "([Pp]ython(, )?)+"
},
{
"code": null,
"e": 16726,
"s": 16679,
"text": "Match \"Python\", \"Python, python, python\", etc."
},
{
"code": null,
"e": 16774,
"s": 16726,
"text": "This matches a previously matched group again β"
},
{
"code": null,
"e": 16793,
"s": 16774,
"text": "([Pp])ython&\\1ails"
},
{
"code": null,
"e": 16830,
"s": 16793,
"text": "Matches python&pails or Python&Pails"
},
{
"code": null,
"e": 16845,
"s": 16830,
"text": "(['\"])[^\\1]*\\1"
},
{
"code": null,
"e": 16968,
"s": 16845,
"text": "Single or double-quoted string. \\1 matches whatever the 1st group matched. \\2 matches whatever the 2nd group matched, etc."
},
{
"code": null,
"e": 16980,
"s": 16968,
"text": "python|perl"
},
{
"code": null,
"e": 17007,
"s": 16980,
"text": "Matches \"python\" or \"perl\""
},
{
"code": null,
"e": 17018,
"s": 17007,
"text": "rub(y|le))"
},
{
"code": null,
"e": 17044,
"s": 17018,
"text": "Matches \"ruby\" or \"ruble\""
},
{
"code": null,
"e": 17058,
"s": 17044,
"text": "Python(!+|\\?)"
},
{
"code": null,
"e": 17102,
"s": 17058,
"text": "\"Python\" followed by one or more ! or one ?"
},
{
"code": null,
"e": 17140,
"s": 17102,
"text": "This need to specify match positions."
},
{
"code": null,
"e": 17148,
"s": 17140,
"text": "^Python"
},
{
"code": null,
"e": 17207,
"s": 17148,
"text": "Matches \"Python\" at the start of a string or internal line"
},
{
"code": null,
"e": 17215,
"s": 17207,
"text": "Python$"
},
{
"code": null,
"e": 17263,
"s": 17215,
"text": "Matches \"Python\" at the end of a string or line"
},
{
"code": null,
"e": 17272,
"s": 17263,
"text": "\\APython"
},
{
"code": null,
"e": 17314,
"s": 17272,
"text": "Matches \"Python\" at the start of a string"
},
{
"code": null,
"e": 17323,
"s": 17314,
"text": "Python\\Z"
},
{
"code": null,
"e": 17363,
"s": 17323,
"text": "Matches \"Python\" at the end of a string"
},
{
"code": null,
"e": 17374,
"s": 17363,
"text": "\\bPython\\b"
},
{
"code": null,
"e": 17410,
"s": 17374,
"text": "Matches \"Python\" at a word boundary"
},
{
"code": null,
"e": 17418,
"s": 17410,
"text": "\\brub\\B"
},
{
"code": null,
"e": 17489,
"s": 17418,
"text": "\\B is nonword boundary: match \"rub\" in \"rube\" and \"ruby\" but not alone"
},
{
"code": null,
"e": 17501,
"s": 17489,
"text": "Python(?=!)"
},
{
"code": null,
"e": 17555,
"s": 17501,
"text": "Matches \"Python\", if followed by an exclamation point"
},
{
"code": null,
"e": 17567,
"s": 17555,
"text": "Python(?!!)"
},
{
"code": null,
"e": 17625,
"s": 17567,
"text": "Matches \"Python\", if not followed by an exclamation point"
},
{
"code": null,
"e": 17638,
"s": 17625,
"text": "R(?#comment)"
},
{
"code": null,
"e": 17677,
"s": 17638,
"text": "Matches \"R\". All the rest is a comment"
},
{
"code": null,
"e": 17686,
"s": 17677,
"text": "R(?i)uby"
},
{
"code": null,
"e": 17724,
"s": 17686,
"text": "Case-insensitive while matching \"uby\""
},
{
"code": null,
"e": 17734,
"s": 17724,
"text": "R(?i:uby)"
},
{
"code": null,
"e": 17748,
"s": 17734,
"text": "Same as above"
},
{
"code": null,
"e": 17761,
"s": 17748,
"text": "rub(?:y|le))"
},
{
"code": null,
"e": 17806,
"s": 17761,
"text": "Group only without creating \\1 backreference"
},
{
"code": null,
"e": 17841,
"s": 17806,
"text": "\n 46 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 17855,
"s": 17841,
"text": " Devi Killada"
},
{
"code": null,
"e": 17890,
"s": 17855,
"text": "\n 11 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 17910,
"s": 17890,
"text": " Harshit Srivastava"
},
{
"code": null,
"e": 17943,
"s": 17910,
"text": "\n 30 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 17959,
"s": 17943,
"text": " TELCOMA Global"
},
{
"code": null,
"e": 17992,
"s": 17959,
"text": "\n 24 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 18009,
"s": 17992,
"text": " Mohammad Nauman"
},
{
"code": null,
"e": 18042,
"s": 18009,
"text": "\n 68 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 18065,
"s": 18042,
"text": " Stone River ELearning"
},
{
"code": null,
"e": 18100,
"s": 18065,
"text": "\n 58 Lectures \n 6.5 hours \n"
},
{
"code": null,
"e": 18123,
"s": 18100,
"text": " Stone River ELearning"
},
{
"code": null,
"e": 18130,
"s": 18123,
"text": " Print"
},
{
"code": null,
"e": 18141,
"s": 18130,
"text": " Add Notes"
}
] |
Rust - Bitwise Operators
|
Assume variable A = 2 and B = 3.
fn main() {
let a:i32 = 2; // Bit presentation 10
let b:i32 = 3; // Bit presentation 11
let mut result:i32;
result = a & b;
println!("(a & b) => {} ",result);
result = a | b;
println!("(a | b) => {} ",result) ;
result = a ^ b;
println!("(a ^ b) => {} ",result);
result = !b;
println!("(!b) => {} ",result);
result = a << b;
println!("(a << b) => {}",result);
result = a >> b;
println!("(a >> b) => {}",result);
}
(a & b) => 2
(a | b) => 3
(a ^ b) => 1
(!b) => -4
(a << b) => 16
(a >> b) => 0
45 Lectures
4.5 hours
Stone River ELearning
10 Lectures
33 mins
Ken Burke
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2120,
"s": 2087,
"text": "Assume variable A = 2 and B = 3."
},
{
"code": null,
"e": 2593,
"s": 2120,
"text": "fn main() {\n let a:i32 = 2; // Bit presentation 10\n let b:i32 = 3; // Bit presentation 11\n\n let mut result:i32;\n\n result = a & b;\n println!(\"(a & b) => {} \",result);\n\n result = a | b;\n println!(\"(a | b) => {} \",result) ;\n\n result = a ^ b;\n println!(\"(a ^ b) => {} \",result);\n\n result = !b;\n println!(\"(!b) => {} \",result);\n\n result = a << b;\n println!(\"(a << b) => {}\",result);\n\n result = a >> b;\n println!(\"(a >> b) => {}\",result);\n}"
},
{
"code": null,
"e": 2673,
"s": 2593,
"text": "(a & b) => 2\n(a | b) => 3\n(a ^ b) => 1\n(!b) => -4\n(a << b) => 16\n(a >> b) => 0\n"
},
{
"code": null,
"e": 2708,
"s": 2673,
"text": "\n 45 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 2731,
"s": 2708,
"text": " Stone River ELearning"
},
{
"code": null,
"e": 2763,
"s": 2731,
"text": "\n 10 Lectures \n 33 mins\n"
},
{
"code": null,
"e": 2774,
"s": 2763,
"text": " Ken Burke"
},
{
"code": null,
"e": 2781,
"s": 2774,
"text": " Print"
},
{
"code": null,
"e": 2792,
"s": 2781,
"text": " Add Notes"
}
] |
Changing the contrast and brightness of an image using Python - OpenCV - GeeksforGeeks
|
12 Dec, 2021
Changing the Brightness and Contrast level of any image is the most basic thing everyone does with an image. It is meant to change the value of each and every pixel of an image it can be done by either multiplying or dividing the pixels value of an image. In this article, we will see how we can implement our theory in a beautiful code using OpenCV Python,
Before starting letβs try to understand some basic concepts like, what is brightness? What is a contrast? What are pixels? And what is OpenCV?
Brightness: When the brightness is adjusted, the entire range of tones within the image is raised or lowered accordingly.
Contrast: When the contrast adjustment is raised, the middle tones are eliminated. The image will have a higher percentage of darks or blacks and whites or highlights with minimal mid-tone.
Pixels: Pixels are typically used to refer to the display resolution of a computer monitor or screen. The greater the pixels, the greater the detail in the image.
OpenCV: OpenCV is the huge open-source library for computer vision, machine learning, and image processing and now it plays a major role in real-time operation
Agenda: To learn how to adjust the brightness and contrast level of an image using OpenCV.
Requirement: OpenCV
Installation:
pip install openCV
Approach:
Import required module.
Define the main function, Define required data in it.
Create a function brightness_contrast, to create a track bar to adjust brightness and contrast.
Create another function to change the brightness and contrast.
Display the original and edited image.
Terminate the program with βESCβ or simply close the window.
Letβs implement this step-wise:
Step 1: Here we will load an image and create a trackbar.
Syntax: imread(filename): filename(Name of the image file).
namedWindow(winname): winname(Name of the window).
Code:
Python3
if __name__ == '__main__': # The function imread loads an # image from the specified file and returns it. original = cv2.imread("pic.jpeg") # Making another copy of an image. img = original.copy() # The function namedWindow creates # a window that can be used as # a placeholder for images. cv2.namedWindow('GEEK') # The function imshow displays # an image in the specified window. cv2.imshow('GEEK', original) # createTrackbar(trackbarName, # windowName, value, count, onChange) # Brightness range -255 to 255 cv2.createTrackbar('Brightness', 'GEEK', 255, 2 * 255, BrightnessContrast) # Contrast range -127 to 127 cv2.createTrackbar('Contrast', 'GEEK', 127, 2 * 127, BrightnessContrast) BrightnessContrast(0) # The function waitKey waits for# a key event infinitely or for# delay milliseconds, when it is positive.cv2.waitKey(0)
Step 2: By calling the controller function, it will return the edited image, After that imshow() function will display the affected image.
Syntax: getTrackbarPos(trackbarname, winname): trackbarname(Name of the trackbar), winname( Name of the window)
Code:
Python3
def BrightnessContrast(brightness=0): # getTrackbarPos returns the # current position of the specified trackbar. brightness = cv2.getTrackbarPos('Brightness', 'GEEK') contrast = cv2.getTrackbarPos('Contrast', 'GEEK') effect = controller(img, brightness, contrast) # The function imshow displays # an image in the specified window cv2.imshow('Effect', effect)
Step 3: The controller function will control the Brightness and Contrast of an image according to the trackbar position and return the edited image.
Syntax: addWeighted(src1, alpha, src2, beta, gamma)
Parameters:
src1: first input array.alpha: (weight of the first array elements.src2: second input array of the same size and channel number as src1.beta: weight of the second array elements.gamma: scalar added to each sum.
putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin)
img: Image.text: Text string to be drawn.org: Bottom-left corner of the text string in the image.fontFace: Font type, see #HersheyFonts.fontScale: Font scale factor that is multiplied by the font-specific base size.color: Text color.thickness: Thickness of the lines used to draw a text.lineType: Line type. See #LineTypes.bottomLeftOrigin: When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner.
Python3
def controller(img, brightness=255, contrast=127): brightness = int((brightness - 0) * (255 - (-255)) / (510 - 0) + (-255)) contrast = int((contrast - 0) * (127 - (-127)) / (254 - 0) + (-127)) if brightness != 0: if brightness > 0: shadow = brightness max = 255 else: shadow = 0 max = 255 + brightness al_pha = (max - shadow) / 255 ga_mma = shadow # The function addWeighted # calculates the weighted sum # of two arrays cal = cv2.addWeighted(img, al_pha, img, 0, ga_mma) else: cal = img if contrast != 0: Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast)) Gamma = 127 * (1 - Alpha) # The function addWeighted calculates # the weighted sum of two arrays cal = cv2.addWeighted(cal, Alpha, cal, 0, Gamma) # putText renders the specified # text string in the image. cv2.putText(cal, 'B:{},C:{}'.format(brightness, contrast), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) return cal
Below is the full Implementation:
Python3
import cv2 def BrightnessContrast(brightness=0): # getTrackbarPos returns the current # position of the specified trackbar. brightness = cv2.getTrackbarPos('Brightness', 'GEEK') contrast = cv2.getTrackbarPos('Contrast', 'GEEK') effect = controller(img, brightness, contrast) # The function imshow displays an image # in the specified window cv2.imshow('Effect', effect) def controller(img, brightness=255, contrast=127): brightness = int((brightness - 0) * (255 - (-255)) / (510 - 0) + (-255)) contrast = int((contrast - 0) * (127 - (-127)) / (254 - 0) + (-127)) if brightness != 0: if brightness > 0: shadow = brightness max = 255 else: shadow = 0 max = 255 + brightness al_pha = (max - shadow) / 255 ga_mma = shadow # The function addWeighted calculates # the weighted sum of two arrays cal = cv2.addWeighted(img, al_pha, img, 0, ga_mma) else: cal = img if contrast != 0: Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast)) Gamma = 127 * (1 - Alpha) # The function addWeighted calculates # the weighted sum of two arrays cal = cv2.addWeighted(cal, Alpha, cal, 0, Gamma) # putText renders the specified text string in the image. cv2.putText(cal, 'B:{},C:{}'.format(brightness, contrast), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) return cal if __name__ == '__main__': # The function imread loads an image # from the specified file and returns it. original = cv2.imread("pic.jpeg") # Making another copy of an image. img = original.copy() # The function namedWindow creates a # window that can be used as a placeholder # for images. cv2.namedWindow('GEEK') # The function imshow displays an # image in the specified window. cv2.imshow('GEEK', original) # createTrackbar(trackbarName, # windowName, value, count, onChange) # Brightness range -255 to 255 cv2.createTrackbar('Brightness', 'GEEK', 255, 2 * 255, BrightnessContrast) # Contrast range -127 to 127 cv2.createTrackbar('Contrast', 'GEEK', 127, 2 * 127, BrightnessContrast) BrightnessContrast(0) # The function waitKey waits for# a key event infinitely or for delay# milliseconds, when it is positive.cv2.waitKey(0)
Output:
sumitgumber28
Python-OpenCV
Technical Scripter 2020
Python
Technical Scripter
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
How to Install PIP on Windows ?
How to drop one or multiple columns in Pandas Dataframe
How To Convert Python Dictionary To JSON?
Check if element exists in list in Python
Python | Pandas dataframe.groupby()
Defaultdict in Python
Python | Get unique values from a list
Python Classes and Objects
Python | os.path.join() method
Create a directory in Python
|
[
{
"code": null,
"e": 23901,
"s": 23873,
"text": "\n12 Dec, 2021"
},
{
"code": null,
"e": 24260,
"s": 23901,
"text": "Changing the Brightness and Contrast level of any image is the most basic thing everyone does with an image. It is meant to change the value of each and every pixel of an image it can be done by either multiplying or dividing the pixels value of an image. In this article, we will see how we can implement our theory in a beautiful code using OpenCV Python, "
},
{
"code": null,
"e": 24403,
"s": 24260,
"text": "Before starting letβs try to understand some basic concepts like, what is brightness? What is a contrast? What are pixels? And what is OpenCV?"
},
{
"code": null,
"e": 24525,
"s": 24403,
"text": "Brightness: When the brightness is adjusted, the entire range of tones within the image is raised or lowered accordingly."
},
{
"code": null,
"e": 24715,
"s": 24525,
"text": "Contrast: When the contrast adjustment is raised, the middle tones are eliminated. The image will have a higher percentage of darks or blacks and whites or highlights with minimal mid-tone."
},
{
"code": null,
"e": 24878,
"s": 24715,
"text": "Pixels: Pixels are typically used to refer to the display resolution of a computer monitor or screen. The greater the pixels, the greater the detail in the image."
},
{
"code": null,
"e": 25038,
"s": 24878,
"text": "OpenCV: OpenCV is the huge open-source library for computer vision, machine learning, and image processing and now it plays a major role in real-time operation"
},
{
"code": null,
"e": 25129,
"s": 25038,
"text": "Agenda: To learn how to adjust the brightness and contrast level of an image using OpenCV."
},
{
"code": null,
"e": 25149,
"s": 25129,
"text": "Requirement: OpenCV"
},
{
"code": null,
"e": 25163,
"s": 25149,
"text": "Installation:"
},
{
"code": null,
"e": 25182,
"s": 25163,
"text": "pip install openCV"
},
{
"code": null,
"e": 25193,
"s": 25182,
"text": "Approach: "
},
{
"code": null,
"e": 25217,
"s": 25193,
"text": "Import required module."
},
{
"code": null,
"e": 25271,
"s": 25217,
"text": "Define the main function, Define required data in it."
},
{
"code": null,
"e": 25367,
"s": 25271,
"text": "Create a function brightness_contrast, to create a track bar to adjust brightness and contrast."
},
{
"code": null,
"e": 25430,
"s": 25367,
"text": "Create another function to change the brightness and contrast."
},
{
"code": null,
"e": 25469,
"s": 25430,
"text": "Display the original and edited image."
},
{
"code": null,
"e": 25530,
"s": 25469,
"text": "Terminate the program with βESCβ or simply close the window."
},
{
"code": null,
"e": 25562,
"s": 25530,
"text": "Letβs implement this step-wise:"
},
{
"code": null,
"e": 25621,
"s": 25562,
"text": "Step 1: Here we will load an image and create a trackbar. "
},
{
"code": null,
"e": 25682,
"s": 25621,
"text": "Syntax: imread(filename): filename(Name of the image file)."
},
{
"code": null,
"e": 25734,
"s": 25682,
"text": "namedWindow(winname): winname(Name of the window)."
},
{
"code": null,
"e": 25740,
"s": 25734,
"text": "Code:"
},
{
"code": null,
"e": 25748,
"s": 25740,
"text": "Python3"
},
{
"code": "if __name__ == '__main__': # The function imread loads an # image from the specified file and returns it. original = cv2.imread(\"pic.jpeg\") # Making another copy of an image. img = original.copy() # The function namedWindow creates # a window that can be used as # a placeholder for images. cv2.namedWindow('GEEK') # The function imshow displays # an image in the specified window. cv2.imshow('GEEK', original) # createTrackbar(trackbarName, # windowName, value, count, onChange) # Brightness range -255 to 255 cv2.createTrackbar('Brightness', 'GEEK', 255, 2 * 255, BrightnessContrast) # Contrast range -127 to 127 cv2.createTrackbar('Contrast', 'GEEK', 127, 2 * 127, BrightnessContrast) BrightnessContrast(0) # The function waitKey waits for# a key event infinitely or for# delay milliseconds, when it is positive.cv2.waitKey(0)",
"e": 26739,
"s": 25748,
"text": null
},
{
"code": null,
"e": 26878,
"s": 26739,
"text": "Step 2: By calling the controller function, it will return the edited image, After that imshow() function will display the affected image."
},
{
"code": null,
"e": 26990,
"s": 26878,
"text": "Syntax: getTrackbarPos(trackbarname, winname): trackbarname(Name of the trackbar), winname( Name of the window)"
},
{
"code": null,
"e": 26996,
"s": 26990,
"text": "Code:"
},
{
"code": null,
"e": 27004,
"s": 26996,
"text": "Python3"
},
{
"code": "def BrightnessContrast(brightness=0): # getTrackbarPos returns the # current position of the specified trackbar. brightness = cv2.getTrackbarPos('Brightness', 'GEEK') contrast = cv2.getTrackbarPos('Contrast', 'GEEK') effect = controller(img, brightness, contrast) # The function imshow displays # an image in the specified window cv2.imshow('Effect', effect)",
"e": 27513,
"s": 27004,
"text": null
},
{
"code": null,
"e": 27663,
"s": 27513,
"text": "Step 3: The controller function will control the Brightness and Contrast of an image according to the trackbar position and return the edited image. "
},
{
"code": null,
"e": 27715,
"s": 27663,
"text": "Syntax: addWeighted(src1, alpha, src2, beta, gamma)"
},
{
"code": null,
"e": 27727,
"s": 27715,
"text": "Parameters:"
},
{
"code": null,
"e": 27938,
"s": 27727,
"text": "src1: first input array.alpha: (weight of the first array elements.src2: second input array of the same size and channel number as src1.beta: weight of the second array elements.gamma: scalar added to each sum."
},
{
"code": null,
"e": 28029,
"s": 27938,
"text": "putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin)"
},
{
"code": null,
"e": 28474,
"s": 28029,
"text": "img: Image.text: Text string to be drawn.org: Bottom-left corner of the text string in the image.fontFace: Font type, see #HersheyFonts.fontScale: Font scale factor that is multiplied by the font-specific base size.color: Text color.thickness: Thickness of the lines used to draw a text.lineType: Line type. See #LineTypes.bottomLeftOrigin: When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner."
},
{
"code": null,
"e": 28482,
"s": 28474,
"text": "Python3"
},
{
"code": "def controller(img, brightness=255, contrast=127): brightness = int((brightness - 0) * (255 - (-255)) / (510 - 0) + (-255)) contrast = int((contrast - 0) * (127 - (-127)) / (254 - 0) + (-127)) if brightness != 0: if brightness > 0: shadow = brightness max = 255 else: shadow = 0 max = 255 + brightness al_pha = (max - shadow) / 255 ga_mma = shadow # The function addWeighted # calculates the weighted sum # of two arrays cal = cv2.addWeighted(img, al_pha, img, 0, ga_mma) else: cal = img if contrast != 0: Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast)) Gamma = 127 * (1 - Alpha) # The function addWeighted calculates # the weighted sum of two arrays cal = cv2.addWeighted(cal, Alpha, cal, 0, Gamma) # putText renders the specified # text string in the image. cv2.putText(cal, 'B:{},C:{}'.format(brightness, contrast), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) return cal",
"e": 29694,
"s": 28482,
"text": null
},
{
"code": null,
"e": 29728,
"s": 29694,
"text": "Below is the full Implementation:"
},
{
"code": null,
"e": 29736,
"s": 29728,
"text": "Python3"
},
{
"code": "import cv2 def BrightnessContrast(brightness=0): # getTrackbarPos returns the current # position of the specified trackbar. brightness = cv2.getTrackbarPos('Brightness', 'GEEK') contrast = cv2.getTrackbarPos('Contrast', 'GEEK') effect = controller(img, brightness, contrast) # The function imshow displays an image # in the specified window cv2.imshow('Effect', effect) def controller(img, brightness=255, contrast=127): brightness = int((brightness - 0) * (255 - (-255)) / (510 - 0) + (-255)) contrast = int((contrast - 0) * (127 - (-127)) / (254 - 0) + (-127)) if brightness != 0: if brightness > 0: shadow = brightness max = 255 else: shadow = 0 max = 255 + brightness al_pha = (max - shadow) / 255 ga_mma = shadow # The function addWeighted calculates # the weighted sum of two arrays cal = cv2.addWeighted(img, al_pha, img, 0, ga_mma) else: cal = img if contrast != 0: Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast)) Gamma = 127 * (1 - Alpha) # The function addWeighted calculates # the weighted sum of two arrays cal = cv2.addWeighted(cal, Alpha, cal, 0, Gamma) # putText renders the specified text string in the image. cv2.putText(cal, 'B:{},C:{}'.format(brightness, contrast), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) return cal if __name__ == '__main__': # The function imread loads an image # from the specified file and returns it. original = cv2.imread(\"pic.jpeg\") # Making another copy of an image. img = original.copy() # The function namedWindow creates a # window that can be used as a placeholder # for images. cv2.namedWindow('GEEK') # The function imshow displays an # image in the specified window. cv2.imshow('GEEK', original) # createTrackbar(trackbarName, # windowName, value, count, onChange) # Brightness range -255 to 255 cv2.createTrackbar('Brightness', 'GEEK', 255, 2 * 255, BrightnessContrast) # Contrast range -127 to 127 cv2.createTrackbar('Contrast', 'GEEK', 127, 2 * 127, BrightnessContrast) BrightnessContrast(0) # The function waitKey waits for# a key event infinitely or for delay# milliseconds, when it is positive.cv2.waitKey(0)",
"e": 32425,
"s": 29736,
"text": null
},
{
"code": null,
"e": 32433,
"s": 32425,
"text": "Output:"
},
{
"code": null,
"e": 32447,
"s": 32433,
"text": "sumitgumber28"
},
{
"code": null,
"e": 32461,
"s": 32447,
"text": "Python-OpenCV"
},
{
"code": null,
"e": 32485,
"s": 32461,
"text": "Technical Scripter 2020"
},
{
"code": null,
"e": 32492,
"s": 32485,
"text": "Python"
},
{
"code": null,
"e": 32511,
"s": 32492,
"text": "Technical Scripter"
},
{
"code": null,
"e": 32609,
"s": 32511,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 32618,
"s": 32609,
"text": "Comments"
},
{
"code": null,
"e": 32631,
"s": 32618,
"text": "Old Comments"
},
{
"code": null,
"e": 32663,
"s": 32631,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 32719,
"s": 32663,
"text": "How to drop one or multiple columns in Pandas Dataframe"
},
{
"code": null,
"e": 32761,
"s": 32719,
"text": "How To Convert Python Dictionary To JSON?"
},
{
"code": null,
"e": 32803,
"s": 32761,
"text": "Check if element exists in list in Python"
},
{
"code": null,
"e": 32839,
"s": 32803,
"text": "Python | Pandas dataframe.groupby()"
},
{
"code": null,
"e": 32861,
"s": 32839,
"text": "Defaultdict in Python"
},
{
"code": null,
"e": 32900,
"s": 32861,
"text": "Python | Get unique values from a list"
},
{
"code": null,
"e": 32927,
"s": 32900,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 32958,
"s": 32927,
"text": "Python | os.path.join() method"
}
] |
PHP 5 vs PHP 7 - GeeksforGeeks
|
09 Mar, 2018
PHP is a server side scripting language designed for web development by Rasmus Lerdorf in 1994. Since its launch in 1994 PHP has become an industry standard supporting almost 80% of the websites ( 79.8% to be precise) with its closest competitor being ASP.Net at 19.8% and others like Ruby, Java trailing far behind.
The PHP development team released the latest version of PHP: PHP 7 claiming it to be twice as fast as its predecessor PHP 5. So, is migrating to PHP 7 really worth it?
Letβs get into some details:
Advantages:
Performance: As per Zend Technologies, the performance improvement is huge!! Just upgrading to PHP 7 gives enormous performance upgrades. Hence, PHP 7 is often termed PHPNG (PHP β Next Gen) taking the performance of your code to a whole new level.Return Type: Developers have been raising their eyebrows over not being able to declare a return type for their function. This has somewhat been taken care of in PHP 7 where you will be able to declare what type of value will be returned. Eg. :
Performance: As per Zend Technologies, the performance improvement is huge!! Just upgrading to PHP 7 gives enormous performance upgrades. Hence, PHP 7 is often termed PHPNG (PHP β Next Gen) taking the performance of your code to a whole new level.
Return Type: Developers have been raising their eyebrows over not being able to declare a return type for their function. This has somewhat been taken care of in PHP 7 where you will be able to declare what type of value will be returned. Eg. :
public function area (float $r) : float
{
return 3.14*$r*$r;
}
Spaceship Operator: As the name suggests, the spaceship operator introduced is certainly from a different world. It can be mostly used in sorting and combined comparison. Example:
Spaceship Operator: As the name suggests, the spaceship operator introduced is certainly from a different world. It can be mostly used in sorting and combined comparison. Example:
Before:
function sort ($a,$b)
{
if ($a>$b)
return 1;
else if ( $a ==$b)
return 0;
else
return -1;
}
In PHP 7:
function sort ($a,$b)
{
return $a < = > $b;
}
Null Coalesce Operator:The coalesce operator (??) returns result of its first operand if it exists, or null if it doesnβt. Eg. :
Null Coalesce Operator:The coalesce operator (??) returns result of its first operand if it exists, or null if it doesnβt. Eg. :
Before:
if (isset ($_GET [βnameβ]))
{
$name = $_GET [βnameβ];
}
else
$name = null;
In PHP 7:
$name = $_GET [βnameβ]?? Null;
Additional Features:
Unicode Codepoint Escape Syntax : PHP 7 introduced syntax to escape Unicode codepoint as below :
echo β\u{202E} Reverse β; // This outputs : esreveR
Deprecation of mysql_* functions: PHP 7 has deprecated all mysql_* functions, now developers have to use mysqli (the intelligent version of MySQL) instead.
Cons:
While there is no major downside to it, but to just point out, here is a list of some:
A lot of extensions are not ready yet for PHP 7.If anyone has functions like βeregβ and βmysqlβ buried deep inside their code base, they are gonna strike a Backward Compatibility wall as these functions are deprecated and, it is going to be a real pain in the behind to upgrade.
A lot of extensions are not ready yet for PHP 7.
If anyone has functions like βeregβ and βmysqlβ buried deep inside their code base, they are gonna strike a Backward Compatibility wall as these functions are deprecated and, it is going to be a real pain in the behind to upgrade.
Conclusion:
PHP 7 is the future of PHP and all the applications will need to upgrade to PHP 7 sooner or later. Like all major revolutions throughout history, the PHP 7 revolution will also be spilling some blood before producing something awesome.
How do you feel about PHP 7 in general? Is it heading in the right direction? Let us know!!
This blog is contributed by Ayusch Jain. If you also wish to showcase your blog here, please see GBlog for guest blog writing on GeeksforGeeks.
PHP-basics
GBlog
PHP
PHP
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Top 10 Front End Developer Skills That You Need in 2022
DSA Sheet by Love Babbar
6 Best IDE's For Python in 2022
A Freshers Guide To Programming
Types of Software Testing
How to Insert Form Data into Database using PHP ?
How to execute PHP code using command line ?
How to pop an alert message box using PHP ?
PHP in_array() Function
How to convert array to string in PHP ?
|
[
{
"code": null,
"e": 23617,
"s": 23589,
"text": "\n09 Mar, 2018"
},
{
"code": null,
"e": 23934,
"s": 23617,
"text": "PHP is a server side scripting language designed for web development by Rasmus Lerdorf in 1994. Since its launch in 1994 PHP has become an industry standard supporting almost 80% of the websites ( 79.8% to be precise) with its closest competitor being ASP.Net at 19.8% and others like Ruby, Java trailing far behind."
},
{
"code": null,
"e": 24102,
"s": 23934,
"text": "The PHP development team released the latest version of PHP: PHP 7 claiming it to be twice as fast as its predecessor PHP 5. So, is migrating to PHP 7 really worth it?"
},
{
"code": null,
"e": 24131,
"s": 24102,
"text": "Letβs get into some details:"
},
{
"code": null,
"e": 24143,
"s": 24131,
"text": "Advantages:"
},
{
"code": null,
"e": 24635,
"s": 24143,
"text": "Performance: As per Zend Technologies, the performance improvement is huge!! Just upgrading to PHP 7 gives enormous performance upgrades. Hence, PHP 7 is often termed PHPNG (PHP β Next Gen) taking the performance of your code to a whole new level.Return Type: Developers have been raising their eyebrows over not being able to declare a return type for their function. This has somewhat been taken care of in PHP 7 where you will be able to declare what type of value will be returned. Eg. :"
},
{
"code": null,
"e": 24883,
"s": 24635,
"text": "Performance: As per Zend Technologies, the performance improvement is huge!! Just upgrading to PHP 7 gives enormous performance upgrades. Hence, PHP 7 is often termed PHPNG (PHP β Next Gen) taking the performance of your code to a whole new level."
},
{
"code": null,
"e": 25128,
"s": 24883,
"text": "Return Type: Developers have been raising their eyebrows over not being able to declare a return type for their function. This has somewhat been taken care of in PHP 7 where you will be able to declare what type of value will be returned. Eg. :"
},
{
"code": null,
"e": 25211,
"s": 25128,
"text": " public function area (float $r) : float\n {\n return 3.14*$r*$r;\n }\n"
},
{
"code": null,
"e": 25391,
"s": 25211,
"text": "Spaceship Operator: As the name suggests, the spaceship operator introduced is certainly from a different world. It can be mostly used in sorting and combined comparison. Example:"
},
{
"code": null,
"e": 25571,
"s": 25391,
"text": "Spaceship Operator: As the name suggests, the spaceship operator introduced is certainly from a different world. It can be mostly used in sorting and combined comparison. Example:"
},
{
"code": null,
"e": 25579,
"s": 25571,
"text": "Before:"
},
{
"code": null,
"e": 25742,
"s": 25579,
"text": " function sort ($a,$b)\n {\n if ($a>$b)\n return 1;\n else if ( $a ==$b)\n return 0;\n else\n return -1;\n }"
},
{
"code": null,
"e": 25753,
"s": 25742,
"text": " In PHP 7:"
},
{
"code": null,
"e": 25819,
"s": 25753,
"text": " function sort ($a,$b)\n {\n return $a < = > $b;\n }\n"
},
{
"code": null,
"e": 25948,
"s": 25819,
"text": "Null Coalesce Operator:The coalesce operator (??) returns result of its first operand if it exists, or null if it doesnβt. Eg. :"
},
{
"code": null,
"e": 26077,
"s": 25948,
"text": "Null Coalesce Operator:The coalesce operator (??) returns result of its first operand if it exists, or null if it doesnβt. Eg. :"
},
{
"code": null,
"e": 26085,
"s": 26077,
"text": "Before:"
},
{
"code": null,
"e": 26189,
"s": 26085,
"text": " if (isset ($_GET [βnameβ]))\n {\n $name = $_GET [βnameβ];\n }\n else\n $name = null;\n"
},
{
"code": null,
"e": 26199,
"s": 26189,
"text": "In PHP 7:"
},
{
"code": null,
"e": 26231,
"s": 26199,
"text": "$name = $_GET [βnameβ]?? Null;\n"
},
{
"code": null,
"e": 26252,
"s": 26231,
"text": "Additional Features:"
},
{
"code": null,
"e": 26349,
"s": 26252,
"text": "Unicode Codepoint Escape Syntax : PHP 7 introduced syntax to escape Unicode codepoint as below :"
},
{
"code": null,
"e": 26416,
"s": 26349,
"text": " echo β\\u{202E} Reverse β; // This outputs : esreveR"
},
{
"code": null,
"e": 26572,
"s": 26416,
"text": "Deprecation of mysql_* functions: PHP 7 has deprecated all mysql_* functions, now developers have to use mysqli (the intelligent version of MySQL) instead."
},
{
"code": null,
"e": 26579,
"s": 26572,
"text": " Cons:"
},
{
"code": null,
"e": 26666,
"s": 26579,
"text": "While there is no major downside to it, but to just point out, here is a list of some:"
},
{
"code": null,
"e": 26945,
"s": 26666,
"text": "A lot of extensions are not ready yet for PHP 7.If anyone has functions like βeregβ and βmysqlβ buried deep inside their code base, they are gonna strike a Backward Compatibility wall as these functions are deprecated and, it is going to be a real pain in the behind to upgrade."
},
{
"code": null,
"e": 26994,
"s": 26945,
"text": "A lot of extensions are not ready yet for PHP 7."
},
{
"code": null,
"e": 27225,
"s": 26994,
"text": "If anyone has functions like βeregβ and βmysqlβ buried deep inside their code base, they are gonna strike a Backward Compatibility wall as these functions are deprecated and, it is going to be a real pain in the behind to upgrade."
},
{
"code": null,
"e": 27237,
"s": 27225,
"text": "Conclusion:"
},
{
"code": null,
"e": 27473,
"s": 27237,
"text": "PHP 7 is the future of PHP and all the applications will need to upgrade to PHP 7 sooner or later. Like all major revolutions throughout history, the PHP 7 revolution will also be spilling some blood before producing something awesome."
},
{
"code": null,
"e": 27565,
"s": 27473,
"text": "How do you feel about PHP 7 in general? Is it heading in the right direction? Let us know!!"
},
{
"code": null,
"e": 27709,
"s": 27565,
"text": "This blog is contributed by Ayusch Jain. If you also wish to showcase your blog here, please see GBlog for guest blog writing on GeeksforGeeks."
},
{
"code": null,
"e": 27720,
"s": 27709,
"text": "PHP-basics"
},
{
"code": null,
"e": 27726,
"s": 27720,
"text": "GBlog"
},
{
"code": null,
"e": 27730,
"s": 27726,
"text": "PHP"
},
{
"code": null,
"e": 27734,
"s": 27730,
"text": "PHP"
},
{
"code": null,
"e": 27832,
"s": 27734,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27841,
"s": 27832,
"text": "Comments"
},
{
"code": null,
"e": 27854,
"s": 27841,
"text": "Old Comments"
},
{
"code": null,
"e": 27910,
"s": 27854,
"text": "Top 10 Front End Developer Skills That You Need in 2022"
},
{
"code": null,
"e": 27935,
"s": 27910,
"text": "DSA Sheet by Love Babbar"
},
{
"code": null,
"e": 27967,
"s": 27935,
"text": "6 Best IDE's For Python in 2022"
},
{
"code": null,
"e": 27999,
"s": 27967,
"text": "A Freshers Guide To Programming"
},
{
"code": null,
"e": 28025,
"s": 27999,
"text": "Types of Software Testing"
},
{
"code": null,
"e": 28075,
"s": 28025,
"text": "How to Insert Form Data into Database using PHP ?"
},
{
"code": null,
"e": 28120,
"s": 28075,
"text": "How to execute PHP code using command line ?"
},
{
"code": null,
"e": 28164,
"s": 28120,
"text": "How to pop an alert message box using PHP ?"
},
{
"code": null,
"e": 28188,
"s": 28164,
"text": "PHP in_array() Function"
}
] |
Sentiment & Engagement Analysis from your Slack data | by Lilla Szulyovszky | Towards Data Science
|
Ever wondered how engaging was the content you delivered? Was it clear or confusing? Or if people misunderstood your message at that company-wide meeting?
Remote environments give very little chance for teachers and leaders to gain feedback and optimize their content towards better performance.
Since a considerable amount of my career was already done remotely (pre-covid times, actually!), I found these questions sparkling excitement and joy in my brain hungry for creative solutions. I had the data, all I had to do is draft the questions I want to answer and do it.
This was also the final, e2e project of my Bootcamp and I had subject matter experts at hand as stakeholders (my lead teacher and my project mentor) to direct me towards a value-driven product.
My dataset included public Slack conversations from the first day till the last at the Ironhack Bootcamp provided by the Slack admin. Could have been done through the Slack API as well, but that was out of scope for my project.
For the sake of keeping this blog post concise, note that I focused on highlighting the exciting parts of the code, not the insights it gave.
If youβre looking for the:
visuals (done in Tableau) check out my presentation
detailed code, browse my GitHub repo here.
To represent the challenge I had with the number of JSON files, here is just the general channelβs folder containing all the conversations broken down by days, like this:
So I started off by loading each channelβs JSON files into a single dataframe.
# defining file pathpath_to_json = '../raw_data/general/' # get all json files from therejson_pattern = os.path.join(path_to_json,'*.json')file_list = glob.glob(json_pattern)# an empty list to store the data framesdfs = []for file in file_list: # read data frame from json file data = pd.read_json(file) # append the data frame to the list dfs.append(data)# concatenate all the data frames in the listchannel_gen = pd.concat(dfs, ignore_index=True)# testchannel_gen.tail(100)
Then went on to merging each channelβs separate dataframes into a single one for convenience.
#frames = [channel_gen, channel_books, channel_dmemes, channel_dresource, channel_dbootcamp, channel_funcommittee, channel_dvizbeauties, channel_frustrations, channel_finalproject, channel_frustrations, channel_funcommittee, channel_katas, channel_labhelp, channel_music, channel_random, channel_vanilla]df = pd.concat([channel_gen, channel_books, channel_dmemes, channel_dresource, channel_dbootcamp, channel_funcommittee, channel_dvizbeauties, channel_frustrations, channel_finalproject, channel_frustrations, channel_funcommittee, channel_katas, channel_labhelp, channel_music, channel_random, channel_vanilla], ignore_index=True, join="outer")
By this time, my dataframe has 5263 lines and 13 columns with a bunch of unrelated data to my project. Cleaning was tough.
Columns to clean & wrangle:
- subtype: filter out it's values from df, remove the original column\- ts: changing it to datetime, remove miliseconds, get days of the week, months of the year, type of the day, parts of the day\- user_profile: extract real_name in new column, remove the original\- attachments: extract title, text, link in new columns\- files: extract url_private and who shared\- attachments: extract title, text, link in new columns\- reactions: extract user, count, name of the emoji\
Since almost all the data was nested in JSON libraries, the majority of my projectβs time was spent iterating through feature engineering tasks to conjure variables I can use to train models with. At the same time, extracting data is what I enjoyed the most. Below you can see a couple of examples of the functions created to draw insights from the dataframe.
Who sent the most replies:
# user_profile column: extract real_namedef getrealnamefromprofile(x): """this function is applied to column user_profile """ if x != x: return 'noname' else: return x['real_name']df_clean['real_name'] = df_clean['user_profile'].apply(getrealnamefromprofile)df_clean
What kind of emojis were used the most in the cohort:
# reactions column: extract frequencydef getcountfromreactions(x): """this function is applied to column reactions """ if x != x: return 0 else: return x[0]['count']df_clean['reactions_count'] = df_clean['reactions'].apply(getcountfromreactions)df_clean
What links people shared on the channels:
# files column: extract linkdef geturlfromfile(x): """this function is applied to column files """ if x != x: return 'nofile' else: try: return x[0]['url_private'] except KeyError: return 'nolink_infiles'df_clean['link_of_file'] = df_clean['files'].apply(geturlfromfile)df_clean
To help me find the source of the communication, I created another function to differentiate the Lead Teacher and the Teaching Assistants from the students.
# create a new column with teaching and studentsdef applyFunc(s): if s == 'siand the LT (she/her)': return 'teacher' if s == 'Florian Titze': return 'teacher' if s == 'Kosta': return 'teacher' else: return 'student' return ''df_clean['participant'] = df_clean['real_name'].apply(applyFunc)df_clean['participant'].value_counts()
Finally, before getting ready for the models β I took a moment to check my cleaned dataframe with appreciation:
While working through this section, I realized this is what I want to specialize in. Text analytics, sounds so cool, right? Just imagine the amount of time people spend reading cumbersome text and trying to analyze it with a brain full of biases when a machine can do it in milliseconds. Making me shiver.
My scope originally included text feature extraction as well (since thatβs the most valuable thing you can get out of written communication) and this is something Iβm working on right now, however, there was no time for that in those 5 days I had and the topic was out of scope for the Bootcamp as well.
Instead, I focused on getting the sentiment score for each comment and generating an awesome worldcloud from the most frequently used words as a present to my peers. β€οΈ
def clean_links(df):#replace URL of a text df_sent['text'] = df_sent['text'].str.replace('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ')clean_links(df_sent)df_sent['text']# load VADERsid = SentimentIntensityAnalyzer()# add VADER metrics to dataframedf_sent['scores'] = df_sent['text'].apply(lambda text: sid.polarity_scores(text))df_sent['compound'] = df_sent['scores'].apply(lambda score_dict: score_dict['compound'])df_sent['comp_score'] = df_sent['compound'].apply(lambda c: 'pos' if c >=0 else 'neg')#test df_sent.head()
This was easy. Now, to the challenging preprocessing part to create a worldcloud without links, numbers, punctuation, stopwords:
# set of stopwords to be removed from textstop = set(stopwords.words('english'))# update stopwords to have punctuation toostop.update(list(string.punctuation))def clean_text(text_list): # Remove unwanted html characters re1 = re.compile(r' +') x1 = text_list.lower().replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace( 'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace( '<br />', "\n").replace('\\"', '"').replace('<unk>', 'u_n').replace(' @.@ ', '.').replace( ' @-@ ', '-').replace('\\', ' \\ ') text = re1.sub(' ', html.unescape(x1)) # remove non-ascii characters text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore') # strip html soup = BeautifulSoup(text, 'html.parser') text = soup.get_text() # remove between square brackets text = re.sub('\[[^]]*\]', '', text) # remove URLs text = re.sub(r'http\S+', '', text) # remove twitter tags text = text.replace("@", "") # remove hashtags text = text.replace("#", "") # remove all non-alphabetic characters text = re.sub(r'[^a-zA-Z ]', '', text) # remove stopwords from text final_text = [] for word in text.split(): if word.strip().lower() not in stop: final_text.append(word.strip().lower()) text = " ".join(final_text) # lemmatize words lemmatizer = WordNetLemmatizer() text = " ".join([lemmatizer.lemmatize(word) for word in text.split()]) text = " ".join([lemmatizer.lemmatize(word, pos = 'v') for word in text.split()]) # replace all numbers with "num" text = re.sub("\d", "num", text) return text.lower()# apply cleaning functiondf_train['prep_text'] = df_train['text'].apply(clean_text)df_train['prep_text'].head(5)# apply wordcloud functionmake_wordcloud(df_train['prep_text'])
and the result: (ta-daa)
To highlight something cool here, I took the Random Forest Classification model to see what features you would need to get a reply (and in this case, to get help from the cohort) with an accuracy score of 0.86:
# feature importancefeat_importances = pd.Series(importances, index=X.columns)plt.figure(figsize=(10,10))feat_importances.nlargest(15).plot(kind='barh', color='#FF9B48', width= 0.7)plt.xlabel('Level of importance', fontsize=16)plt.ylabel('Features', fontsize=16)plt.yticks([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14], ['length_of_text', 'neutral_tone', 'positive_tone', 'amount_of_reactions', 'negative_tone', 'may', 'morning','march', 'files_attached', 'teacher_posted', 'evening', 'early_morning', 'labhelp_channel', 'general_channel', 'got_reaction'])plt.title("Top 15 Important Features", fontsize=20)plt.show()
It seems like you have a better chance to get a reply if you: write a lengthy message in a neutral or positive tone, receive a lot of reactions to it, sending it in the morning also helps or if you have a file attached to it.
Some things I learned through this project were that:
working on something you are invested in is a game-changer
having stakeholders by your side is invaluable
iteration is key
functions save you time in the long run
To continue, Iβll take this data set and implement my newly acquired knowledge from Udemyβs NLP course to extract some cool stuff from the comments too.
|
[
{
"code": null,
"e": 202,
"s": 47,
"text": "Ever wondered how engaging was the content you delivered? Was it clear or confusing? Or if people misunderstood your message at that company-wide meeting?"
},
{
"code": null,
"e": 343,
"s": 202,
"text": "Remote environments give very little chance for teachers and leaders to gain feedback and optimize their content towards better performance."
},
{
"code": null,
"e": 619,
"s": 343,
"text": "Since a considerable amount of my career was already done remotely (pre-covid times, actually!), I found these questions sparkling excitement and joy in my brain hungry for creative solutions. I had the data, all I had to do is draft the questions I want to answer and do it."
},
{
"code": null,
"e": 813,
"s": 619,
"text": "This was also the final, e2e project of my Bootcamp and I had subject matter experts at hand as stakeholders (my lead teacher and my project mentor) to direct me towards a value-driven product."
},
{
"code": null,
"e": 1041,
"s": 813,
"text": "My dataset included public Slack conversations from the first day till the last at the Ironhack Bootcamp provided by the Slack admin. Could have been done through the Slack API as well, but that was out of scope for my project."
},
{
"code": null,
"e": 1183,
"s": 1041,
"text": "For the sake of keeping this blog post concise, note that I focused on highlighting the exciting parts of the code, not the insights it gave."
},
{
"code": null,
"e": 1210,
"s": 1183,
"text": "If youβre looking for the:"
},
{
"code": null,
"e": 1262,
"s": 1210,
"text": "visuals (done in Tableau) check out my presentation"
},
{
"code": null,
"e": 1305,
"s": 1262,
"text": "detailed code, browse my GitHub repo here."
},
{
"code": null,
"e": 1476,
"s": 1305,
"text": "To represent the challenge I had with the number of JSON files, here is just the general channelβs folder containing all the conversations broken down by days, like this:"
},
{
"code": null,
"e": 1555,
"s": 1476,
"text": "So I started off by loading each channelβs JSON files into a single dataframe."
},
{
"code": null,
"e": 2043,
"s": 1555,
"text": "# defining file pathpath_to_json = '../raw_data/general/' # get all json files from therejson_pattern = os.path.join(path_to_json,'*.json')file_list = glob.glob(json_pattern)# an empty list to store the data framesdfs = []for file in file_list: # read data frame from json file data = pd.read_json(file) # append the data frame to the list dfs.append(data)# concatenate all the data frames in the listchannel_gen = pd.concat(dfs, ignore_index=True)# testchannel_gen.tail(100)"
},
{
"code": null,
"e": 2137,
"s": 2043,
"text": "Then went on to merging each channelβs separate dataframes into a single one for convenience."
},
{
"code": null,
"e": 2923,
"s": 2137,
"text": "#frames = [channel_gen, channel_books, channel_dmemes, channel_dresource, channel_dbootcamp, channel_funcommittee, channel_dvizbeauties, channel_frustrations, channel_finalproject, channel_frustrations, channel_funcommittee, channel_katas, channel_labhelp, channel_music, channel_random, channel_vanilla]df = pd.concat([channel_gen, channel_books, channel_dmemes, channel_dresource, channel_dbootcamp, channel_funcommittee, channel_dvizbeauties, channel_frustrations, channel_finalproject, channel_frustrations, channel_funcommittee, channel_katas, channel_labhelp, channel_music, channel_random, channel_vanilla], ignore_index=True, join=\"outer\")"
},
{
"code": null,
"e": 3046,
"s": 2923,
"text": "By this time, my dataframe has 5263 lines and 13 columns with a bunch of unrelated data to my project. Cleaning was tough."
},
{
"code": null,
"e": 3074,
"s": 3046,
"text": "Columns to clean & wrangle:"
},
{
"code": null,
"e": 3549,
"s": 3074,
"text": "- subtype: filter out it's values from df, remove the original column\\- ts: changing it to datetime, remove miliseconds, get days of the week, months of the year, type of the day, parts of the day\\- user_profile: extract real_name in new column, remove the original\\- attachments: extract title, text, link in new columns\\- files: extract url_private and who shared\\- attachments: extract title, text, link in new columns\\- reactions: extract user, count, name of the emoji\\"
},
{
"code": null,
"e": 3909,
"s": 3549,
"text": "Since almost all the data was nested in JSON libraries, the majority of my projectβs time was spent iterating through feature engineering tasks to conjure variables I can use to train models with. At the same time, extracting data is what I enjoyed the most. Below you can see a couple of examples of the functions created to draw insights from the dataframe."
},
{
"code": null,
"e": 3936,
"s": 3909,
"text": "Who sent the most replies:"
},
{
"code": null,
"e": 4233,
"s": 3936,
"text": "# user_profile column: extract real_namedef getrealnamefromprofile(x): \"\"\"this function is applied to column user_profile \"\"\" if x != x: return 'noname' else: return x['real_name']df_clean['real_name'] = df_clean['user_profile'].apply(getrealnamefromprofile)df_clean"
},
{
"code": null,
"e": 4287,
"s": 4233,
"text": "What kind of emojis were used the most in the cohort:"
},
{
"code": null,
"e": 4572,
"s": 4287,
"text": "# reactions column: extract frequencydef getcountfromreactions(x): \"\"\"this function is applied to column reactions \"\"\" if x != x: return 0 else: return x[0]['count']df_clean['reactions_count'] = df_clean['reactions'].apply(getcountfromreactions)df_clean"
},
{
"code": null,
"e": 4614,
"s": 4572,
"text": "What links people shared on the channels:"
},
{
"code": null,
"e": 4952,
"s": 4614,
"text": "# files column: extract linkdef geturlfromfile(x): \"\"\"this function is applied to column files \"\"\" if x != x: return 'nofile' else: try: return x[0]['url_private'] except KeyError: return 'nolink_infiles'df_clean['link_of_file'] = df_clean['files'].apply(geturlfromfile)df_clean"
},
{
"code": null,
"e": 5109,
"s": 4952,
"text": "To help me find the source of the communication, I created another function to differentiate the Lead Teacher and the Teaching Assistants from the students."
},
{
"code": null,
"e": 5482,
"s": 5109,
"text": "# create a new column with teaching and studentsdef applyFunc(s): if s == 'siand the LT (she/her)': return 'teacher' if s == 'Florian Titze': return 'teacher' if s == 'Kosta': return 'teacher' else: return 'student' return ''df_clean['participant'] = df_clean['real_name'].apply(applyFunc)df_clean['participant'].value_counts()"
},
{
"code": null,
"e": 5594,
"s": 5482,
"text": "Finally, before getting ready for the models β I took a moment to check my cleaned dataframe with appreciation:"
},
{
"code": null,
"e": 5900,
"s": 5594,
"text": "While working through this section, I realized this is what I want to specialize in. Text analytics, sounds so cool, right? Just imagine the amount of time people spend reading cumbersome text and trying to analyze it with a brain full of biases when a machine can do it in milliseconds. Making me shiver."
},
{
"code": null,
"e": 6204,
"s": 5900,
"text": "My scope originally included text feature extraction as well (since thatβs the most valuable thing you can get out of written communication) and this is something Iβm working on right now, however, there was no time for that in those 5 days I had and the topic was out of scope for the Bootcamp as well."
},
{
"code": null,
"e": 6373,
"s": 6204,
"text": "Instead, I focused on getting the sentiment score for each comment and generating an awesome worldcloud from the most frequently used words as a present to my peers. β€οΈ"
},
{
"code": null,
"e": 6941,
"s": 6373,
"text": "def clean_links(df):#replace URL of a text df_sent['text'] = df_sent['text'].str.replace('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ')clean_links(df_sent)df_sent['text']# load VADERsid = SentimentIntensityAnalyzer()# add VADER metrics to dataframedf_sent['scores'] = df_sent['text'].apply(lambda text: sid.polarity_scores(text))df_sent['compound'] = df_sent['scores'].apply(lambda score_dict: score_dict['compound'])df_sent['comp_score'] = df_sent['compound'].apply(lambda c: 'pos' if c >=0 else 'neg')#test df_sent.head()"
},
{
"code": null,
"e": 7070,
"s": 6941,
"text": "This was easy. Now, to the challenging preprocessing part to create a worldcloud without links, numbers, punctuation, stopwords:"
},
{
"code": null,
"e": 8977,
"s": 7070,
"text": "# set of stopwords to be removed from textstop = set(stopwords.words('english'))# update stopwords to have punctuation toostop.update(list(string.punctuation))def clean_text(text_list): # Remove unwanted html characters re1 = re.compile(r' +') x1 = text_list.lower().replace('#39;', \"'\").replace('amp;', '&').replace('#146;', \"'\").replace( 'nbsp;', ' ').replace('#36;', '$').replace('\\\\n', \"\\n\").replace('quot;', \"'\").replace( '<br />', \"\\n\").replace('\\\\\"', '\"').replace('<unk>', 'u_n').replace(' @.@ ', '.').replace( ' @-@ ', '-').replace('\\\\', ' \\\\ ') text = re1.sub(' ', html.unescape(x1)) # remove non-ascii characters text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore') # strip html soup = BeautifulSoup(text, 'html.parser') text = soup.get_text() # remove between square brackets text = re.sub('\\[[^]]*\\]', '', text) # remove URLs text = re.sub(r'http\\S+', '', text) # remove twitter tags text = text.replace(\"@\", \"\") # remove hashtags text = text.replace(\"#\", \"\") # remove all non-alphabetic characters text = re.sub(r'[^a-zA-Z ]', '', text) # remove stopwords from text final_text = [] for word in text.split(): if word.strip().lower() not in stop: final_text.append(word.strip().lower()) text = \" \".join(final_text) # lemmatize words lemmatizer = WordNetLemmatizer() text = \" \".join([lemmatizer.lemmatize(word) for word in text.split()]) text = \" \".join([lemmatizer.lemmatize(word, pos = 'v') for word in text.split()]) # replace all numbers with \"num\" text = re.sub(\"\\d\", \"num\", text) return text.lower()# apply cleaning functiondf_train['prep_text'] = df_train['text'].apply(clean_text)df_train['prep_text'].head(5)# apply wordcloud functionmake_wordcloud(df_train['prep_text'])"
},
{
"code": null,
"e": 9002,
"s": 8977,
"text": "and the result: (ta-daa)"
},
{
"code": null,
"e": 9213,
"s": 9002,
"text": "To highlight something cool here, I took the Random Forest Classification model to see what features you would need to get a reply (and in this case, to get help from the cohort) with an accuracy score of 0.86:"
},
{
"code": null,
"e": 10023,
"s": 9213,
"text": "# feature importancefeat_importances = pd.Series(importances, index=X.columns)plt.figure(figsize=(10,10))feat_importances.nlargest(15).plot(kind='barh', color='#FF9B48', width= 0.7)plt.xlabel('Level of importance', fontsize=16)plt.ylabel('Features', fontsize=16)plt.yticks([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14], ['length_of_text', 'neutral_tone', 'positive_tone', 'amount_of_reactions', 'negative_tone', 'may', 'morning','march', 'files_attached', 'teacher_posted', 'evening', 'early_morning', 'labhelp_channel', 'general_channel', 'got_reaction'])plt.title(\"Top 15 Important Features\", fontsize=20)plt.show()"
},
{
"code": null,
"e": 10249,
"s": 10023,
"text": "It seems like you have a better chance to get a reply if you: write a lengthy message in a neutral or positive tone, receive a lot of reactions to it, sending it in the morning also helps or if you have a file attached to it."
},
{
"code": null,
"e": 10303,
"s": 10249,
"text": "Some things I learned through this project were that:"
},
{
"code": null,
"e": 10362,
"s": 10303,
"text": "working on something you are invested in is a game-changer"
},
{
"code": null,
"e": 10409,
"s": 10362,
"text": "having stakeholders by your side is invaluable"
},
{
"code": null,
"e": 10426,
"s": 10409,
"text": "iteration is key"
},
{
"code": null,
"e": 10466,
"s": 10426,
"text": "functions save you time in the long run"
}
] |
Generating Trade Signals using Moving Average(MA) Crossover Strategy β A Python implementation | by Pratik Nabriya | Towards Data Science
|
β β β β β β β β β β β β β β β β Disclaimer β The trading strategies and related information in this article is for the educational purpose only. All investments and trading in the stock market involve risk. Any decisions related to buying/selling of stocks or other financial instruments should only be made after a thorough research and seeking a professional assistance if required.β β β β β β β β β β β β β β β β
Indicators such as Moving averages(MAs), Bollinger bands, Relative Strength Index(RSI) are mathematical technical analysis tools that traders and investors use to analyze the past and anticipate future price trends and patterns. Where fundamentalists may track economic data, annual reports, or various other measures, quantitative traders and analysts rely on the charts and indicators to help interpret price moves.
The goal when using indicators is to identify trading opportunities. For example, a moving average crossover often signals an upcoming trend change. Applying the moving average crossover strategy to a price chart allows traders to identify areas where the trend changes the direction creating a potential trading opportunity.
Before we begin, you may consider going through below article to get yourself accustomed with some common finance jargons associated with stock market.
medium.com
A moving average, also called as rolling average or running average is a used to analyze the time-series data by calculating a series of averages of the different subsets of full dataset.
Moving averages are the averages of a series of numeric values. They have a predefined length for the number of values to average and this set of values moves forward as more data is added with time. Given a series of numbers and a fixed subset size, the first element of the moving averages is obtained by taking the average of the initial fixed subset of the number series. Then to obtain subsequent moving averages the subset is βshift forwardβ i.e. exclude the first element of the previous subset and add the element immediately after the previous subset to the new subset keeping the length fixed . Since it involves taking the average of the dataset over time, it is also called a moving mean (MM) or rolling mean.
In the technical analysis of financial data, moving averages(MAs) are among the most widely used trend following indicators that demonstrate the direction of the marketβs trend.
There are many different types of moving averages depending on how the averages are computed. In any time-series data analysis, the most commonly used types of moving averages are β
Simple Moving Average(SMA)
Weighted Moving Average(WMA)
Exponential Moving Average (EMA or EWMA)
The only noteworthy difference between the various moving averages is the weight assigned to data points in the moving average period. Simple moving averages apply equal weight to all data points. Exponential and weighted averages apply more weight to recent data points.
Among these, Simple Moving Averages(SMAs) and Exponential Moving Averages(EMAs) are arguably the most popular technical analysis tool used by the analysts and traders. In this article, weβll focus primarily on the strategies involving SMAs and EMAs.
Simple Moving Average is one of the core technical indicators used by traders and investors for the technical analysis of a stock, index or securities. Simple moving average is calculated by adding the the closing price of last n number of days and then diving by the number of days(time-period). Before we dive deep, letβs first understand the math behind simple averages.
We have studied how to compute average in school and even in our daily life we often come across the notion of it. Letβs say you are watching a game of cricket and a batsman comes for batting. By looking at his previous 5 match scoresβ 60, 75, 55, 80, 50; you can expect him to score roughly around 60β70 runs in todayβs match.
By calculating the average of a batsman from his last 5 matches, you were able to make a crude prediction that heβll score this much runs today. Although, this is a rough estimation and doesnβt guarantee that heβll score exactly same runs, but still the chances are high. Likewise, SMA helps in predicting the future trend and determine whether an asset price will continue or reverse a bull or bear trend. The SMA is usually used to identify trend direction, but it can also be used to generate potential trading signals.
Calculating Simple moving averages β The formula for calculating the SMA is straightforward:
The simple moving average = (sum of the an asset price over the past n periods) / (number of periods)
All elements in the SMA have the same weightage. If the moving average period is 5, then each element in the SMA will have a 20% (1/5) weightage in the SMA.
βn periodsβ can be anything. You can have a 200 day simple moving average, a 100 hour simple moving average, a 5 day simple moving average, a 26 week simple moving average, etc.
Now that we have accustomed ourselves with the basics, letβs jump to the Python implementation.
For this example, I have taken the 2 years of historical data of the Closing Price of UltraTech Cement Limited stock(ULTRACEMCO as registered on NSE) from 1st Feb 2018 to 1st Feb 2020. You may choose your own set of stocks and the time period for the analysis.
Letβs began by extracting the stock price data from Yahoo Finance by using Pandas-datareader API.
Importing necessary libraries β
import numpy as np import pandas as pdimport matplotlib.pyplot as pltimport datetimeimport
Extracting closing price data of UltraTech Cement stock for the aforementioned time-period β
# import packageimport pandas_datareader.data as web# set start and end dates start = datetime.datetime(2018, 2, 1) end = datetime.datetime(2020, 2, 1) # extract the closing price dataultratech_df = web.DataReader(['ULTRACEMCO.NS'], 'yahoo', start = start, end = end)['Close']ultratech_df.columns = {'Close Price'}ultratech_df.head(10)
Note that SMAs are calculated on closing prices and not adjusted close because we want the trade signal to be generated on the price data and not influenced by dividends paid.
Observe general price variation of the closing price for the give period β
ultratech_df[βClose Priceβ].plot(figsize = (15, 8))plt.grid()plt.ylabel("Price in Rupees"plt.show()
Create new columns in our dataframe for both the long(i.e. 50 days) and short (i.e 20 days) simple moving averages (SMAs) β
# create 20 days simple moving average columnultratech_df[β20_SMAβ] = ultratech_df[βClose Priceβ].rolling(window = 20, min_periods = 1).mean()# create 50 days simple moving average columnultratech_df[β50_SMAβ] = ultratech_df[βClose Priceβ].rolling(window = 50, min_periods = 1).mean()# display first few rowsultratech_df.head()
In Pandas, dataframe.rolling() function provides the feature of rolling window calculations. min_periods parameter specifies the minimum number of observations in window required to have a value (otherwise result is NA).
Now that we have 20-days and 50-days SMAs, next we see how to strategize this information to generate the trade signals.
There are several ways in which stock market analysts and investors can use moving averages to analyse price trends and predict upcoming change of trends. There are vast varieties of the moving average strategies that can be developed using different types of moving averages. In this article, Iβve tried to demonstrate well-known simplistic yet effective momentum strategies β Simple Moving Average Crossover strategy and Exponential Moving Average Crossover strategy.
In the statistics of time-series, and in particular the Stock market technical analysis, a moving-average crossover occurs when on plotting, the two moving averages each based on different time-periods tend to cross. This indicator uses two (or more) moving averages β a faster moving average(short-term) and a slower(long-term) moving average. The faster moving average may be 5-, 10- or 25-day period while the slower moving average can be 50-, 100- or 200-day period. A short term moving average is faster because it only considers prices over short period of time and is thus more reactive to daily price changes. On the other hand, a long-term moving average is deemed slower as it encapsulates prices over a longer period and is more lethargic.
A moving average, as a line by itself, is often overlaid in price charts to indicate price trends. A crossover occurs when a faster moving average (i.e. a shorter period moving average) crosses a slower moving average (i.e. a longer period moving average). In stock trading, this meeting point can be used as a potential indicator to buy or sell an asset.
When the short term moving average crosses above the long term moving average, this indicates a buy signal.
Contrary, when the short term moving average crosses below the long term moving average, it may be a good moment to sell.
Having equipped with the necessary theory, now letβs continue our Python implementation wherein weβll try to incorporate this strategy.
In our existing pandas dataframe, create a new column βSignalβ such that if 20-day SMA is greater than 50-day SMA then set Signal value as 1 else when 50-day SMA is greater than 20-day SMA then set itβs value as 0.
ultratech_df['Signal'] = 0.0ultratech_df['Signal'] = np.where(ultratech_df['20_SMA'] > ultratech_df['50_SMA'], 1.0, 0.0)
From these βSignalβ values, the position orders can be generated to represent trading signals. Crossover happens when the faster moving average and the slower moving average cross, or in other words the βSignalβ changes from 0 to 1 (or 1 to 0). So, to incorporate this information, create a new column βPositionβ which nothing but a day-to-day difference of the βSignalβ column.
ultratech_df[βPositionβ] = ultratech_df[βSignalβ].diff()# display first few rowsultratech_df.head()
When βPositionβ = 1, it implies that the Signal has changed from 0 to 1 meaning a short-term(faster) moving average has crossed above the long-term(slower) moving average, thereby triggering a buy call.
When βPositionβ = -1, it implies that the Signal has changed from 1 to 0 meaning a short-term(faster) moving average has crossed below the long-term(slower) moving average, thereby triggering a sell call.
Now letβs visualize this using a plot to make it more clear.
plt.figure(figsize = (20,10))# plot close price, short-term and long-term moving averages ultratech_df[βClose Priceβ].plot(color = βkβ, label= βClose Priceβ) ultratech_df[β20_SMAβ].plot(color = βbβ,label = β20-day SMAβ) ultratech_df[β50_SMAβ].plot(color = βgβ, label = β50-day SMAβ)# plot βbuyβ signalsplt.plot(ultratech_df[ultratech_df[βPositionβ] == 1].index, ultratech_df[β20_SMAβ][ultratech_df[βPositionβ] == 1], β^β, markersize = 15, color = βgβ, label = 'buy')# plot βsellβ signalsplt.plot(ultratech_df[ultratech_df[βPositionβ] == -1].index, ultratech_df[β20_SMAβ][ultratech_df[βPositionβ] == -1], βvβ, markersize = 15, color = βrβ, label = 'sell')plt.ylabel('Price in Rupees', fontsize = 15 )plt.xlabel('Date', fontsize = 15 )plt.title('ULTRACEMCO', fontsize = 20)plt.legend()plt.grid()plt.show()
As you can see in the above plot, the blue line represents the faster moving average(20 day SMA), the green line represents the slower moving average(50 day SMA) and the black line represents the actual closing price. If you carefully observe, these moving averages are nothing but the smoothed versions of the actual price, but lagging by certain period of time. The short-term moving average closely resembles the actual price which perfectly makes sense as it takes into consideration more recent prices. In contrast, the long-term moving average has comparatively more lag and loosely resembles the actual price curve.
A signal to buy (as represented by green up-triangle) is triggered when the fast moving average crosses above the slow moving average. This shows a shift in trend i.e. the average price over last 20 days has risen above the average price of past 50 days. Likewise, a signal to sell(as represented by red down-triangle) is triggered when the fast moving average crosses below the slow moving average indicating that the average price in last 20 days has fallen below the average price of the last 50 days.
So far we have discussed the moving average crossover strategy using the simple moving averages(SMAs). It is straightforward to observe that SMA time-series are much less noisy than the original price. However, this comes at a cost β SMA lag the original price, which means that changes in the trend are only seen with a delay of L days. How much is this lag L? For a SMA moving average calculated using M days, the lag is roughly around M/2 days. Thus, if we are using a 50 days SMA, this means we may be late by almost 25 days, which can significantly affect our strategy.
One way to reduce the lag induced by the use of the SMA is to use Exponential Moving Average(EMA). Exponential moving averages give more weight to the most recent periods. This makes them more reliable than SMAs as they are comparatively better representation of the recent performance of the asset. The EMA is calculated as:
EMA [today] = (Ξ± x Price [today] ) + ((1 β Ξ±) x EMA [yesterday] )
Where: Ξ± = 2/(N + 1)N = the length of the window (moving average period)EMA [today] = the current EMA valuePrice [today] = the current closing priceEMA [yesterday] = the previous EMA value
Although the calculation for an EMA looks bit daunting, in practice itβs simple. In fact, itβs easier to calculate than SMA, and besides, the Pandas ewm functionality will do it for you in a single-line of code!
Having understood the basics, letβs try to incorporate EMAs in place of SMAs in our moving average strategy. Weβre going to use the same code as above, with some minor changes.
# set start and end datesstart = datetime.datetime(2018, 2, 1)end = datetime.datetime(2020, 2, 1)# extract the daily closing price dataultratech_df = web.DataReader(['ULTRACEMCO.NS'], 'yahoo', start = start, end = end)['Close']ultratech_df.columns = {'Close Price'}# Create 20 days exponential moving average columnultratech_df['20_EMA'] = ultratech_df['Close Price'].ewm(span = 20, adjust = False).mean()# Create 50 days exponential moving average columnultratech_df['50_EMA'] = ultratech_df['Close Price'].ewm(span = 50, adjust = False).mean()# create a new column 'Signal' such that if 20-day EMA is greater # than 50-day EMA then set Signal as 1 else 0 ultratech_df['Signal'] = 0.0 ultratech_df['Signal'] = np.where(ultratech_df['20_EMA'] > ultratech_df['50_EMA'], 1.0, 0.0)# create a new column 'Position' which is a day-to-day difference of # the 'Signal' columnultratech_df['Position'] = ultratech_df['Signal'].diff()plt.figure(figsize = (20,10))# plot close price, short-term and long-term moving averages ultratech_df['Close Price'].plot(color = 'k', lw = 1, label = 'Close Price') ultratech_df['20_EMA'].plot(color = 'b', lw = 1, label = '20-day EMA') ultratech_df['50_EMA'].plot(color = 'g', lw = 1, label = '50-day EMA')# plot βbuyβ and 'sell' signalsplt.plot(ultratech_df[ultratech_df[βPositionβ] == 1].index, ultratech_df[β20_EMAβ][ultratech_df[βPositionβ] == 1], β^β, markersize = 15, color = βgβ, label = 'buy')plt.plot(ultratech_df[ultratech_df[βPositionβ] == -1].index, ultratech_df[β20_EMAβ][ultratech_df[βPositionβ] == -1], βvβ, markersize = 15, color = βrβ, label = 'sell')plt.ylabel('Price in Rupees', fontsize = 15 )plt.xlabel('Date', fontsize = 15 )plt.title('ULTRACEMCO - EMA Crossover', fontsize = 20)plt.legend()plt.grid()plt.show()
The following extract from John J. Murphyβs work, βTechnical Analysis of the Financial Marketsβ published by the New York Institute of Finance, explains the advantage of the exponentially weighted moving average over the simple moving averageβ
βThe exponentially smoothed moving average addresses both of the problems associated with the simple moving average. First, the exponentially smoothed average assigns a greater weight to the more recent data. Therefore, it is a weighted moving average. But while it assigns lesser importance to past price data, it does include in its calculation all the data in the life of the instrument. In addition, the user is able to adjust the weighting to give greater or lesser weight to the most recent dayβs price, which is added to a percentage of the previous dayβs value. The sum of both percentage values adds up to 100.β
The function βMovingAverageCrossStrategy()β takes following inputs β
stock_symbol β(str) stock ticker as on Yahoo finance. Eg: 'ULTRACEMCO.NS'
start_date β (str)start analysis from this date (format: 'YYYY-MM-DD') Eg: '2018-01-01'.
end_dateβ (str)end analysis on this date (format: 'YYYY-MM-DD') Eg: '2020-01-01'.
short_windowβ (int)look-back period for short-term moving average. Eg: 5, 10, 20
long_window β (int)look-back period for long-term moving average. Eg: 50, 100, 200
moving_avgβ (str)the type of moving average to use ('SMA' or 'EMA').
display_table β (bool)whether to display the date and price table at buy/sell positions(True/False).
Now, letβs test our script on last 4 years of HDFC bank stock. Weβll be using 50-day and 200-day SMA crossover strategy.
Input:
MovingAverageCrossStrategy('HDFC.NS', '2016-08-31', '2020-08-31', 50, 200, 'SMA', display_table = True)
Output:
How about Fortis Healtcare stock? This time we analyze past 1 year of data and consider 20-days and 50-days EMA Crossover. Also, this time we wonβt be displaying the table.
Input:
MovingAverageCrossStrategy('FORTIS.NS', '2019-08-31', '2020-08-31', 20, 50, 'EMA', display_table = False)
Output:
Due to the fundamental difference in the way they are calculated, EMA reacts quickly to the price changes while SMA is comparatively slow to react. But, one is not necessarily better than another. Each trader must decide which MA is better for his or her particular strategy. In general, shorter-term traders tend to use EMAs because they want to be alerted as soon as the price is moving the other way. On the other hand, longer-term traders tend to rely on SMAs since these investors arenβt rushing to act and prefer to be less actively engaged in their trades.
Beware! As a trend-following indicators, moving averages work in markets that have clear, long term trends. They donβt work that well in markets that can be very choppy for long periods of time. Moral of the story β moving averages are not a one-size-fits-all holy grail. In fact, there is no perfect indicator or a strategy that will guarantee success on each investment in all circumstances. Quantitative traders often use a variety of technical indicators and their combinations to come up with different strategies. In my subsequent articles, I will try to introduce some of these technical indicators.
In this article, I showed how to build a powerful tool to perform technical analysis and generate trade signals using moving average crossover strategy. This script can be used for investigating other company stocks by simply changing the argument to the function MovingAverageCrossStrategy().
This is only the beginning, it is possible to create much more sophisticated strategies which Iβll be looking forward to.
Incorporate more strategies based on indicators like Bollinger bands, Moving Average Convergence Divergence (MACD), Relative Strength Index(RSI) etc.
Perform backtesting to evaluate the performance of different strategies using appropriate metrics.
QuantInsti blogsInvestopediaYahoo FinanceMoving Averages Simplified by Clif DrokeTechnical Analysis of the Financial Markets by John J. Murphy
QuantInsti blogs
Investopedia
Yahoo Finance
Moving Averages Simplified by Clif Droke
Technical Analysis of the Financial Markets by John J. Murphy
You may also want to check my other article β
|
[
{
"code": null,
"e": 588,
"s": 172,
"text": "β β β β β β β β β β β β β β β β Disclaimer β The trading strategies and related information in this article is for the educational purpose only. All investments and trading in the stock market involve risk. Any decisions related to buying/selling of stocks or other financial instruments should only be made after a thorough research and seeking a professional assistance if required.β β β β β β β β β β β β β β β β"
},
{
"code": null,
"e": 1006,
"s": 588,
"text": "Indicators such as Moving averages(MAs), Bollinger bands, Relative Strength Index(RSI) are mathematical technical analysis tools that traders and investors use to analyze the past and anticipate future price trends and patterns. Where fundamentalists may track economic data, annual reports, or various other measures, quantitative traders and analysts rely on the charts and indicators to help interpret price moves."
},
{
"code": null,
"e": 1332,
"s": 1006,
"text": "The goal when using indicators is to identify trading opportunities. For example, a moving average crossover often signals an upcoming trend change. Applying the moving average crossover strategy to a price chart allows traders to identify areas where the trend changes the direction creating a potential trading opportunity."
},
{
"code": null,
"e": 1484,
"s": 1332,
"text": "Before we begin, you may consider going through below article to get yourself accustomed with some common finance jargons associated with stock market."
},
{
"code": null,
"e": 1495,
"s": 1484,
"text": "medium.com"
},
{
"code": null,
"e": 1683,
"s": 1495,
"text": "A moving average, also called as rolling average or running average is a used to analyze the time-series data by calculating a series of averages of the different subsets of full dataset."
},
{
"code": null,
"e": 2405,
"s": 1683,
"text": "Moving averages are the averages of a series of numeric values. They have a predefined length for the number of values to average and this set of values moves forward as more data is added with time. Given a series of numbers and a fixed subset size, the first element of the moving averages is obtained by taking the average of the initial fixed subset of the number series. Then to obtain subsequent moving averages the subset is βshift forwardβ i.e. exclude the first element of the previous subset and add the element immediately after the previous subset to the new subset keeping the length fixed . Since it involves taking the average of the dataset over time, it is also called a moving mean (MM) or rolling mean."
},
{
"code": null,
"e": 2583,
"s": 2405,
"text": "In the technical analysis of financial data, moving averages(MAs) are among the most widely used trend following indicators that demonstrate the direction of the marketβs trend."
},
{
"code": null,
"e": 2765,
"s": 2583,
"text": "There are many different types of moving averages depending on how the averages are computed. In any time-series data analysis, the most commonly used types of moving averages are β"
},
{
"code": null,
"e": 2792,
"s": 2765,
"text": "Simple Moving Average(SMA)"
},
{
"code": null,
"e": 2821,
"s": 2792,
"text": "Weighted Moving Average(WMA)"
},
{
"code": null,
"e": 2862,
"s": 2821,
"text": "Exponential Moving Average (EMA or EWMA)"
},
{
"code": null,
"e": 3134,
"s": 2862,
"text": "The only noteworthy difference between the various moving averages is the weight assigned to data points in the moving average period. Simple moving averages apply equal weight to all data points. Exponential and weighted averages apply more weight to recent data points."
},
{
"code": null,
"e": 3384,
"s": 3134,
"text": "Among these, Simple Moving Averages(SMAs) and Exponential Moving Averages(EMAs) are arguably the most popular technical analysis tool used by the analysts and traders. In this article, weβll focus primarily on the strategies involving SMAs and EMAs."
},
{
"code": null,
"e": 3758,
"s": 3384,
"text": "Simple Moving Average is one of the core technical indicators used by traders and investors for the technical analysis of a stock, index or securities. Simple moving average is calculated by adding the the closing price of last n number of days and then diving by the number of days(time-period). Before we dive deep, letβs first understand the math behind simple averages."
},
{
"code": null,
"e": 4086,
"s": 3758,
"text": "We have studied how to compute average in school and even in our daily life we often come across the notion of it. Letβs say you are watching a game of cricket and a batsman comes for batting. By looking at his previous 5 match scoresβ 60, 75, 55, 80, 50; you can expect him to score roughly around 60β70 runs in todayβs match."
},
{
"code": null,
"e": 4609,
"s": 4086,
"text": "By calculating the average of a batsman from his last 5 matches, you were able to make a crude prediction that heβll score this much runs today. Although, this is a rough estimation and doesnβt guarantee that heβll score exactly same runs, but still the chances are high. Likewise, SMA helps in predicting the future trend and determine whether an asset price will continue or reverse a bull or bear trend. The SMA is usually used to identify trend direction, but it can also be used to generate potential trading signals."
},
{
"code": null,
"e": 4702,
"s": 4609,
"text": "Calculating Simple moving averages β The formula for calculating the SMA is straightforward:"
},
{
"code": null,
"e": 4804,
"s": 4702,
"text": "The simple moving average = (sum of the an asset price over the past n periods) / (number of periods)"
},
{
"code": null,
"e": 4961,
"s": 4804,
"text": "All elements in the SMA have the same weightage. If the moving average period is 5, then each element in the SMA will have a 20% (1/5) weightage in the SMA."
},
{
"code": null,
"e": 5139,
"s": 4961,
"text": "βn periodsβ can be anything. You can have a 200 day simple moving average, a 100 hour simple moving average, a 5 day simple moving average, a 26 week simple moving average, etc."
},
{
"code": null,
"e": 5235,
"s": 5139,
"text": "Now that we have accustomed ourselves with the basics, letβs jump to the Python implementation."
},
{
"code": null,
"e": 5496,
"s": 5235,
"text": "For this example, I have taken the 2 years of historical data of the Closing Price of UltraTech Cement Limited stock(ULTRACEMCO as registered on NSE) from 1st Feb 2018 to 1st Feb 2020. You may choose your own set of stocks and the time period for the analysis."
},
{
"code": null,
"e": 5594,
"s": 5496,
"text": "Letβs began by extracting the stock price data from Yahoo Finance by using Pandas-datareader API."
},
{
"code": null,
"e": 5626,
"s": 5594,
"text": "Importing necessary libraries β"
},
{
"code": null,
"e": 5718,
"s": 5626,
"text": "import numpy as np import pandas as pdimport matplotlib.pyplot as pltimport datetimeimport "
},
{
"code": null,
"e": 5811,
"s": 5718,
"text": "Extracting closing price data of UltraTech Cement stock for the aforementioned time-period β"
},
{
"code": null,
"e": 6147,
"s": 5811,
"text": "# import packageimport pandas_datareader.data as web# set start and end dates start = datetime.datetime(2018, 2, 1) end = datetime.datetime(2020, 2, 1) # extract the closing price dataultratech_df = web.DataReader(['ULTRACEMCO.NS'], 'yahoo', start = start, end = end)['Close']ultratech_df.columns = {'Close Price'}ultratech_df.head(10)"
},
{
"code": null,
"e": 6323,
"s": 6147,
"text": "Note that SMAs are calculated on closing prices and not adjusted close because we want the trade signal to be generated on the price data and not influenced by dividends paid."
},
{
"code": null,
"e": 6398,
"s": 6323,
"text": "Observe general price variation of the closing price for the give period β"
},
{
"code": null,
"e": 6498,
"s": 6398,
"text": "ultratech_df[βClose Priceβ].plot(figsize = (15, 8))plt.grid()plt.ylabel(\"Price in Rupees\"plt.show()"
},
{
"code": null,
"e": 6622,
"s": 6498,
"text": "Create new columns in our dataframe for both the long(i.e. 50 days) and short (i.e 20 days) simple moving averages (SMAs) β"
},
{
"code": null,
"e": 6950,
"s": 6622,
"text": "# create 20 days simple moving average columnultratech_df[β20_SMAβ] = ultratech_df[βClose Priceβ].rolling(window = 20, min_periods = 1).mean()# create 50 days simple moving average columnultratech_df[β50_SMAβ] = ultratech_df[βClose Priceβ].rolling(window = 50, min_periods = 1).mean()# display first few rowsultratech_df.head()"
},
{
"code": null,
"e": 7171,
"s": 6950,
"text": "In Pandas, dataframe.rolling() function provides the feature of rolling window calculations. min_periods parameter specifies the minimum number of observations in window required to have a value (otherwise result is NA)."
},
{
"code": null,
"e": 7292,
"s": 7171,
"text": "Now that we have 20-days and 50-days SMAs, next we see how to strategize this information to generate the trade signals."
},
{
"code": null,
"e": 7762,
"s": 7292,
"text": "There are several ways in which stock market analysts and investors can use moving averages to analyse price trends and predict upcoming change of trends. There are vast varieties of the moving average strategies that can be developed using different types of moving averages. In this article, Iβve tried to demonstrate well-known simplistic yet effective momentum strategies β Simple Moving Average Crossover strategy and Exponential Moving Average Crossover strategy."
},
{
"code": null,
"e": 8513,
"s": 7762,
"text": "In the statistics of time-series, and in particular the Stock market technical analysis, a moving-average crossover occurs when on plotting, the two moving averages each based on different time-periods tend to cross. This indicator uses two (or more) moving averages β a faster moving average(short-term) and a slower(long-term) moving average. The faster moving average may be 5-, 10- or 25-day period while the slower moving average can be 50-, 100- or 200-day period. A short term moving average is faster because it only considers prices over short period of time and is thus more reactive to daily price changes. On the other hand, a long-term moving average is deemed slower as it encapsulates prices over a longer period and is more lethargic."
},
{
"code": null,
"e": 8869,
"s": 8513,
"text": "A moving average, as a line by itself, is often overlaid in price charts to indicate price trends. A crossover occurs when a faster moving average (i.e. a shorter period moving average) crosses a slower moving average (i.e. a longer period moving average). In stock trading, this meeting point can be used as a potential indicator to buy or sell an asset."
},
{
"code": null,
"e": 8977,
"s": 8869,
"text": "When the short term moving average crosses above the long term moving average, this indicates a buy signal."
},
{
"code": null,
"e": 9099,
"s": 8977,
"text": "Contrary, when the short term moving average crosses below the long term moving average, it may be a good moment to sell."
},
{
"code": null,
"e": 9235,
"s": 9099,
"text": "Having equipped with the necessary theory, now letβs continue our Python implementation wherein weβll try to incorporate this strategy."
},
{
"code": null,
"e": 9450,
"s": 9235,
"text": "In our existing pandas dataframe, create a new column βSignalβ such that if 20-day SMA is greater than 50-day SMA then set Signal value as 1 else when 50-day SMA is greater than 20-day SMA then set itβs value as 0."
},
{
"code": null,
"e": 9571,
"s": 9450,
"text": "ultratech_df['Signal'] = 0.0ultratech_df['Signal'] = np.where(ultratech_df['20_SMA'] > ultratech_df['50_SMA'], 1.0, 0.0)"
},
{
"code": null,
"e": 9950,
"s": 9571,
"text": "From these βSignalβ values, the position orders can be generated to represent trading signals. Crossover happens when the faster moving average and the slower moving average cross, or in other words the βSignalβ changes from 0 to 1 (or 1 to 0). So, to incorporate this information, create a new column βPositionβ which nothing but a day-to-day difference of the βSignalβ column."
},
{
"code": null,
"e": 10050,
"s": 9950,
"text": "ultratech_df[βPositionβ] = ultratech_df[βSignalβ].diff()# display first few rowsultratech_df.head()"
},
{
"code": null,
"e": 10253,
"s": 10050,
"text": "When βPositionβ = 1, it implies that the Signal has changed from 0 to 1 meaning a short-term(faster) moving average has crossed above the long-term(slower) moving average, thereby triggering a buy call."
},
{
"code": null,
"e": 10458,
"s": 10253,
"text": "When βPositionβ = -1, it implies that the Signal has changed from 1 to 0 meaning a short-term(faster) moving average has crossed below the long-term(slower) moving average, thereby triggering a sell call."
},
{
"code": null,
"e": 10519,
"s": 10458,
"text": "Now letβs visualize this using a plot to make it more clear."
},
{
"code": null,
"e": 11359,
"s": 10519,
"text": "plt.figure(figsize = (20,10))# plot close price, short-term and long-term moving averages ultratech_df[βClose Priceβ].plot(color = βkβ, label= βClose Priceβ) ultratech_df[β20_SMAβ].plot(color = βbβ,label = β20-day SMAβ) ultratech_df[β50_SMAβ].plot(color = βgβ, label = β50-day SMAβ)# plot βbuyβ signalsplt.plot(ultratech_df[ultratech_df[βPositionβ] == 1].index, ultratech_df[β20_SMAβ][ultratech_df[βPositionβ] == 1], β^β, markersize = 15, color = βgβ, label = 'buy')# plot βsellβ signalsplt.plot(ultratech_df[ultratech_df[βPositionβ] == -1].index, ultratech_df[β20_SMAβ][ultratech_df[βPositionβ] == -1], βvβ, markersize = 15, color = βrβ, label = 'sell')plt.ylabel('Price in Rupees', fontsize = 15 )plt.xlabel('Date', fontsize = 15 )plt.title('ULTRACEMCO', fontsize = 20)plt.legend()plt.grid()plt.show()"
},
{
"code": null,
"e": 11982,
"s": 11359,
"text": "As you can see in the above plot, the blue line represents the faster moving average(20 day SMA), the green line represents the slower moving average(50 day SMA) and the black line represents the actual closing price. If you carefully observe, these moving averages are nothing but the smoothed versions of the actual price, but lagging by certain period of time. The short-term moving average closely resembles the actual price which perfectly makes sense as it takes into consideration more recent prices. In contrast, the long-term moving average has comparatively more lag and loosely resembles the actual price curve."
},
{
"code": null,
"e": 12487,
"s": 11982,
"text": "A signal to buy (as represented by green up-triangle) is triggered when the fast moving average crosses above the slow moving average. This shows a shift in trend i.e. the average price over last 20 days has risen above the average price of past 50 days. Likewise, a signal to sell(as represented by red down-triangle) is triggered when the fast moving average crosses below the slow moving average indicating that the average price in last 20 days has fallen below the average price of the last 50 days."
},
{
"code": null,
"e": 13062,
"s": 12487,
"text": "So far we have discussed the moving average crossover strategy using the simple moving averages(SMAs). It is straightforward to observe that SMA time-series are much less noisy than the original price. However, this comes at a cost β SMA lag the original price, which means that changes in the trend are only seen with a delay of L days. How much is this lag L? For a SMA moving average calculated using M days, the lag is roughly around M/2 days. Thus, if we are using a 50 days SMA, this means we may be late by almost 25 days, which can significantly affect our strategy."
},
{
"code": null,
"e": 13388,
"s": 13062,
"text": "One way to reduce the lag induced by the use of the SMA is to use Exponential Moving Average(EMA). Exponential moving averages give more weight to the most recent periods. This makes them more reliable than SMAs as they are comparatively better representation of the recent performance of the asset. The EMA is calculated as:"
},
{
"code": null,
"e": 13454,
"s": 13388,
"text": "EMA [today] = (Ξ± x Price [today] ) + ((1 β Ξ±) x EMA [yesterday] )"
},
{
"code": null,
"e": 13643,
"s": 13454,
"text": "Where: Ξ± = 2/(N + 1)N = the length of the window (moving average period)EMA [today] = the current EMA valuePrice [today] = the current closing priceEMA [yesterday] = the previous EMA value"
},
{
"code": null,
"e": 13855,
"s": 13643,
"text": "Although the calculation for an EMA looks bit daunting, in practice itβs simple. In fact, itβs easier to calculate than SMA, and besides, the Pandas ewm functionality will do it for you in a single-line of code!"
},
{
"code": null,
"e": 14032,
"s": 13855,
"text": "Having understood the basics, letβs try to incorporate EMAs in place of SMAs in our moving average strategy. Weβre going to use the same code as above, with some minor changes."
},
{
"code": null,
"e": 15833,
"s": 14032,
"text": "# set start and end datesstart = datetime.datetime(2018, 2, 1)end = datetime.datetime(2020, 2, 1)# extract the daily closing price dataultratech_df = web.DataReader(['ULTRACEMCO.NS'], 'yahoo', start = start, end = end)['Close']ultratech_df.columns = {'Close Price'}# Create 20 days exponential moving average columnultratech_df['20_EMA'] = ultratech_df['Close Price'].ewm(span = 20, adjust = False).mean()# Create 50 days exponential moving average columnultratech_df['50_EMA'] = ultratech_df['Close Price'].ewm(span = 50, adjust = False).mean()# create a new column 'Signal' such that if 20-day EMA is greater # than 50-day EMA then set Signal as 1 else 0 ultratech_df['Signal'] = 0.0 ultratech_df['Signal'] = np.where(ultratech_df['20_EMA'] > ultratech_df['50_EMA'], 1.0, 0.0)# create a new column 'Position' which is a day-to-day difference of # the 'Signal' columnultratech_df['Position'] = ultratech_df['Signal'].diff()plt.figure(figsize = (20,10))# plot close price, short-term and long-term moving averages ultratech_df['Close Price'].plot(color = 'k', lw = 1, label = 'Close Price') ultratech_df['20_EMA'].plot(color = 'b', lw = 1, label = '20-day EMA') ultratech_df['50_EMA'].plot(color = 'g', lw = 1, label = '50-day EMA')# plot βbuyβ and 'sell' signalsplt.plot(ultratech_df[ultratech_df[βPositionβ] == 1].index, ultratech_df[β20_EMAβ][ultratech_df[βPositionβ] == 1], β^β, markersize = 15, color = βgβ, label = 'buy')plt.plot(ultratech_df[ultratech_df[βPositionβ] == -1].index, ultratech_df[β20_EMAβ][ultratech_df[βPositionβ] == -1], βvβ, markersize = 15, color = βrβ, label = 'sell')plt.ylabel('Price in Rupees', fontsize = 15 )plt.xlabel('Date', fontsize = 15 )plt.title('ULTRACEMCO - EMA Crossover', fontsize = 20)plt.legend()plt.grid()plt.show()"
},
{
"code": null,
"e": 16077,
"s": 15833,
"text": "The following extract from John J. Murphyβs work, βTechnical Analysis of the Financial Marketsβ published by the New York Institute of Finance, explains the advantage of the exponentially weighted moving average over the simple moving averageβ"
},
{
"code": null,
"e": 16698,
"s": 16077,
"text": "βThe exponentially smoothed moving average addresses both of the problems associated with the simple moving average. First, the exponentially smoothed average assigns a greater weight to the more recent data. Therefore, it is a weighted moving average. But while it assigns lesser importance to past price data, it does include in its calculation all the data in the life of the instrument. In addition, the user is able to adjust the weighting to give greater or lesser weight to the most recent dayβs price, which is added to a percentage of the previous dayβs value. The sum of both percentage values adds up to 100.β"
},
{
"code": null,
"e": 16767,
"s": 16698,
"text": "The function βMovingAverageCrossStrategy()β takes following inputs β"
},
{
"code": null,
"e": 16841,
"s": 16767,
"text": "stock_symbol β(str) stock ticker as on Yahoo finance. Eg: 'ULTRACEMCO.NS'"
},
{
"code": null,
"e": 16930,
"s": 16841,
"text": "start_date β (str)start analysis from this date (format: 'YYYY-MM-DD') Eg: '2018-01-01'."
},
{
"code": null,
"e": 17012,
"s": 16930,
"text": "end_dateβ (str)end analysis on this date (format: 'YYYY-MM-DD') Eg: '2020-01-01'."
},
{
"code": null,
"e": 17093,
"s": 17012,
"text": "short_windowβ (int)look-back period for short-term moving average. Eg: 5, 10, 20"
},
{
"code": null,
"e": 17176,
"s": 17093,
"text": "long_window β (int)look-back period for long-term moving average. Eg: 50, 100, 200"
},
{
"code": null,
"e": 17245,
"s": 17176,
"text": "moving_avgβ (str)the type of moving average to use ('SMA' or 'EMA')."
},
{
"code": null,
"e": 17346,
"s": 17245,
"text": "display_table β (bool)whether to display the date and price table at buy/sell positions(True/False)."
},
{
"code": null,
"e": 17467,
"s": 17346,
"text": "Now, letβs test our script on last 4 years of HDFC bank stock. Weβll be using 50-day and 200-day SMA crossover strategy."
},
{
"code": null,
"e": 17474,
"s": 17467,
"text": "Input:"
},
{
"code": null,
"e": 17578,
"s": 17474,
"text": "MovingAverageCrossStrategy('HDFC.NS', '2016-08-31', '2020-08-31', 50, 200, 'SMA', display_table = True)"
},
{
"code": null,
"e": 17586,
"s": 17578,
"text": "Output:"
},
{
"code": null,
"e": 17759,
"s": 17586,
"text": "How about Fortis Healtcare stock? This time we analyze past 1 year of data and consider 20-days and 50-days EMA Crossover. Also, this time we wonβt be displaying the table."
},
{
"code": null,
"e": 17766,
"s": 17759,
"text": "Input:"
},
{
"code": null,
"e": 17872,
"s": 17766,
"text": "MovingAverageCrossStrategy('FORTIS.NS', '2019-08-31', '2020-08-31', 20, 50, 'EMA', display_table = False)"
},
{
"code": null,
"e": 17880,
"s": 17872,
"text": "Output:"
},
{
"code": null,
"e": 18444,
"s": 17880,
"text": "Due to the fundamental difference in the way they are calculated, EMA reacts quickly to the price changes while SMA is comparatively slow to react. But, one is not necessarily better than another. Each trader must decide which MA is better for his or her particular strategy. In general, shorter-term traders tend to use EMAs because they want to be alerted as soon as the price is moving the other way. On the other hand, longer-term traders tend to rely on SMAs since these investors arenβt rushing to act and prefer to be less actively engaged in their trades."
},
{
"code": null,
"e": 19051,
"s": 18444,
"text": "Beware! As a trend-following indicators, moving averages work in markets that have clear, long term trends. They donβt work that well in markets that can be very choppy for long periods of time. Moral of the story β moving averages are not a one-size-fits-all holy grail. In fact, there is no perfect indicator or a strategy that will guarantee success on each investment in all circumstances. Quantitative traders often use a variety of technical indicators and their combinations to come up with different strategies. In my subsequent articles, I will try to introduce some of these technical indicators."
},
{
"code": null,
"e": 19345,
"s": 19051,
"text": "In this article, I showed how to build a powerful tool to perform technical analysis and generate trade signals using moving average crossover strategy. This script can be used for investigating other company stocks by simply changing the argument to the function MovingAverageCrossStrategy()."
},
{
"code": null,
"e": 19467,
"s": 19345,
"text": "This is only the beginning, it is possible to create much more sophisticated strategies which Iβll be looking forward to."
},
{
"code": null,
"e": 19617,
"s": 19467,
"text": "Incorporate more strategies based on indicators like Bollinger bands, Moving Average Convergence Divergence (MACD), Relative Strength Index(RSI) etc."
},
{
"code": null,
"e": 19716,
"s": 19617,
"text": "Perform backtesting to evaluate the performance of different strategies using appropriate metrics."
},
{
"code": null,
"e": 19859,
"s": 19716,
"text": "QuantInsti blogsInvestopediaYahoo FinanceMoving Averages Simplified by Clif DrokeTechnical Analysis of the Financial Markets by John J. Murphy"
},
{
"code": null,
"e": 19876,
"s": 19859,
"text": "QuantInsti blogs"
},
{
"code": null,
"e": 19889,
"s": 19876,
"text": "Investopedia"
},
{
"code": null,
"e": 19903,
"s": 19889,
"text": "Yahoo Finance"
},
{
"code": null,
"e": 19944,
"s": 19903,
"text": "Moving Averages Simplified by Clif Droke"
},
{
"code": null,
"e": 20006,
"s": 19944,
"text": "Technical Analysis of the Financial Markets by John J. Murphy"
}
] |
Divide numbers from two columns and display result in a new column with MySQL
|
Let us first create a table β
mysql> create table DemoTable719 (FirstNumber int,SecondNumber int);
Query OK, 0 rows affected (0.57 sec)
Insert some records in the table using insert command β
mysql> insert into DemoTable719 values(20,10);
Query OK, 1 row affected (0.18 sec)
mysql> insert into DemoTable719 values(500,50);
Query OK, 1 row affected (0.22 sec)
mysql> insert into DemoTable719 values(400,20);
Query OK, 1 row affected (0.11 sec)
Display all records from the table using select statement β
mysql> select *from DemoTable719;
This will produce the following output β
+-------------+--------------+
| FirstNumber | SecondNumber |
+-------------+--------------+
| 20 | 10 |
| 500 | 50 |
| 400 | 20 |
+-------------+--------------+
3 rows in set (0.00 sec)
Following is the query to divide numbers from two columns and display result in a new column β
mysql> select *,FirstNumber/SecondNumber AS Result from DemoTable719;
This will produce the following output 7minus;
+-------------+--------------+---------+
| FirstNumber | SecondNumber | Result |
+-------------+--------------+---------+
| 20 | 10 | 2.0000 |
| 500 | 50 | 10.0000 |
| 400 | 20 | 20.0000 |
+-------------+--------------+---------+
3 rows in set (0.00 sec)
|
[
{
"code": null,
"e": 1092,
"s": 1062,
"text": "Let us first create a table β"
},
{
"code": null,
"e": 1198,
"s": 1092,
"text": "mysql> create table DemoTable719 (FirstNumber int,SecondNumber int);\nQuery OK, 0 rows affected (0.57 sec)"
},
{
"code": null,
"e": 1254,
"s": 1198,
"text": "Insert some records in the table using insert command β"
},
{
"code": null,
"e": 1505,
"s": 1254,
"text": "mysql> insert into DemoTable719 values(20,10);\nQuery OK, 1 row affected (0.18 sec)\nmysql> insert into DemoTable719 values(500,50);\nQuery OK, 1 row affected (0.22 sec)\nmysql> insert into DemoTable719 values(400,20);\nQuery OK, 1 row affected (0.11 sec)"
},
{
"code": null,
"e": 1565,
"s": 1505,
"text": "Display all records from the table using select statement β"
},
{
"code": null,
"e": 1599,
"s": 1565,
"text": "mysql> select *from DemoTable719;"
},
{
"code": null,
"e": 1640,
"s": 1599,
"text": "This will produce the following output β"
},
{
"code": null,
"e": 1882,
"s": 1640,
"text": "+-------------+--------------+\n| FirstNumber | SecondNumber |\n+-------------+--------------+\n| 20 | 10 |\n| 500 | 50 |\n| 400 | 20 |\n+-------------+--------------+\n3 rows in set (0.00 sec)"
},
{
"code": null,
"e": 1977,
"s": 1882,
"text": "Following is the query to divide numbers from two columns and display result in a new column β"
},
{
"code": null,
"e": 2047,
"s": 1977,
"text": "mysql> select *,FirstNumber/SecondNumber AS Result from DemoTable719;"
},
{
"code": null,
"e": 2094,
"s": 2047,
"text": "This will produce the following output 7minus;"
},
{
"code": null,
"e": 2406,
"s": 2094,
"text": "+-------------+--------------+---------+\n| FirstNumber | SecondNumber | Result |\n+-------------+--------------+---------+\n| 20 | 10 | 2.0000 |\n| 500 | 50 | 10.0000 |\n| 400 | 20 | 20.0000 |\n+-------------+--------------+---------+\n3 rows in set (0.00 sec)"
}
] |
SciPy - Spatial
|
The scipy.spatial package can compute Triangulations, Voronoi Diagrams and Convex Hulls of a set of points, by leveraging the Qhull library. Moreover, it contains KDTree implementations for nearest-neighbor point queries and utilities for distance computations in various metrics.
Let us understand what Delaunay Triangulations are and how they are used in SciPy.
In mathematics and computational geometry, a Delaunay triangulation for a given set P of discrete points in a plane is a triangulation DT(P) such that no point in P is inside the circumcircle of any triangle in DT(P).
We can the compute the same through SciPy. Let us consider the following example.
from scipy.spatial import Delaunay
points = np.array([[0, 4], [2, 1.1], [1, 3], [1, 2]])
tri = Delaunay(points)
import matplotlib.pyplot as plt
plt.triplot(points[:,0], points[:,1], tri.simplices.copy())
plt.plot(points[:,0], points[:,1], 'o')
plt.show()
The above program will generate the following output.
Let us understand what Coplanar Points are and how they are used in SciPy.
Coplanar points are three or more points that lie in the same plane. Recall that a plane is a flat surface, which extends without end in all directions. It is usually shown in math textbooks as a four-sided figure.
Let us see how we can find this using SciPy. Let us consider the following example.
from scipy.spatial import Delaunay
points = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [1, 1]])
tri = Delaunay(points)
print tri.coplanar
The above program will generate the following output.
array([[4, 0, 3]], dtype = int32)
This means that point 4 resides near triangle 0 and vertex 3, but is not included in the triangulation.
Let us understand what convex hulls are and how they are used in SciPy.
In mathematics, the convex hull or convex envelope of a set of points X in the Euclidean plane or in a Euclidean space (or, more generally, in an affine space over the reals) is the smallest convex set that contains X.
Let us consider the following example to understand it in detail.
from scipy.spatial import ConvexHull
points = np.random.rand(10, 2) # 30 random points in 2-D
hull = ConvexHull(points)
import matplotlib.pyplot as plt
plt.plot(points[:,0], points[:,1], 'o')
for simplex in hull.simplices:
plt.plot(points[simplex,0], points[simplex,1], 'k-')
plt.show()
The above program will generate the following output.
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2168,
"s": 1887,
"text": "The scipy.spatial package can compute Triangulations, Voronoi Diagrams and Convex Hulls of a set of points, by leveraging the Qhull library. Moreover, it contains KDTree implementations for nearest-neighbor point queries and utilities for distance computations in various metrics."
},
{
"code": null,
"e": 2251,
"s": 2168,
"text": "Let us understand what Delaunay Triangulations are and how they are used in SciPy."
},
{
"code": null,
"e": 2469,
"s": 2251,
"text": "In mathematics and computational geometry, a Delaunay triangulation for a given set P of discrete points in a plane is a triangulation DT(P) such that no point in P is inside the circumcircle of any triangle in DT(P)."
},
{
"code": null,
"e": 2551,
"s": 2469,
"text": "We can the compute the same through SciPy. Let us consider the following example."
},
{
"code": null,
"e": 2806,
"s": 2551,
"text": "from scipy.spatial import Delaunay\npoints = np.array([[0, 4], [2, 1.1], [1, 3], [1, 2]])\ntri = Delaunay(points)\nimport matplotlib.pyplot as plt\nplt.triplot(points[:,0], points[:,1], tri.simplices.copy())\nplt.plot(points[:,0], points[:,1], 'o')\nplt.show()"
},
{
"code": null,
"e": 2860,
"s": 2806,
"text": "The above program will generate the following output."
},
{
"code": null,
"e": 2935,
"s": 2860,
"text": "Let us understand what Coplanar Points are and how they are used in SciPy."
},
{
"code": null,
"e": 3150,
"s": 2935,
"text": "Coplanar points are three or more points that lie in the same plane. Recall that a plane is a flat surface, which extends without end in all directions. It is usually shown in math textbooks as a four-sided figure."
},
{
"code": null,
"e": 3234,
"s": 3150,
"text": "Let us see how we can find this using SciPy. Let us consider the following example."
},
{
"code": null,
"e": 3371,
"s": 3234,
"text": "from scipy.spatial import Delaunay\npoints = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [1, 1]])\ntri = Delaunay(points)\nprint tri.coplanar"
},
{
"code": null,
"e": 3425,
"s": 3371,
"text": "The above program will generate the following output."
},
{
"code": null,
"e": 3460,
"s": 3425,
"text": "array([[4, 0, 3]], dtype = int32)\n"
},
{
"code": null,
"e": 3564,
"s": 3460,
"text": "This means that point 4 resides near triangle 0 and vertex 3, but is not included in the triangulation."
},
{
"code": null,
"e": 3636,
"s": 3564,
"text": "Let us understand what convex hulls are and how they are used in SciPy."
},
{
"code": null,
"e": 3855,
"s": 3636,
"text": "In mathematics, the convex hull or convex envelope of a set of points X in the Euclidean plane or in a Euclidean space (or, more generally, in an affine space over the reals) is the smallest convex set that contains X."
},
{
"code": null,
"e": 3921,
"s": 3855,
"text": "Let us consider the following example to understand it in detail."
},
{
"code": null,
"e": 4208,
"s": 3921,
"text": "from scipy.spatial import ConvexHull\npoints = np.random.rand(10, 2) # 30 random points in 2-D\nhull = ConvexHull(points)\nimport matplotlib.pyplot as plt\nplt.plot(points[:,0], points[:,1], 'o')\nfor simplex in hull.simplices:\nplt.plot(points[simplex,0], points[simplex,1], 'k-')\nplt.show()"
},
{
"code": null,
"e": 4262,
"s": 4208,
"text": "The above program will generate the following output."
},
{
"code": null,
"e": 4269,
"s": 4262,
"text": " Print"
},
{
"code": null,
"e": 4280,
"s": 4269,
"text": " Add Notes"
}
] |
Kruskalβs Minimum Spanning Tree using STL in C++
|
In this tutorial, we will be discussing a program to understand Kruskalβs minimum spanning tree using STL in C++.
For this, we will be provided with a connected, undirected and weighted graph. Our task is to calculate the Minimum spanning tree for the given graph.
Live Demo
#include<bits/stdc++.h>
using namespace std;
typedef pair<int, int> iPair;
//structure for graph
struct Graph{
int V, E;
vector< pair<int, iPair> > edges;
Graph(int V, int E){
this->V = V;
this->E = E;
}
void addEdge(int u, int v, int w){
edges.push_back({w, {u, v}});
}
int kruskalMST();
};
struct DisjointSets{
int *parent, *rnk;
int n;
DisjointSets(int n){
this->n = n;
parent = new int[n+1];
rnk = new int[n+1];
for (int i = 0; i <= n; i++){
rnk[i] = 0;
parent[i] = i;
}
}
int find(int u){
if (u != parent[u])
parent[u] = find(parent[u]);
return parent[u];
}
void merge(int x, int y){
x = find(x), y = find(y);
if (rnk[x] > rnk[y])
parent[y] = x;
else
parent[x] = y;
if (rnk[x] == rnk[y])
rnk[y]++;
}
};
int Graph::kruskalMST(){
int mst_wt = 0;
sort(edges.begin(), edges.end());
DisjointSets ds(V);
vector< pair<int, iPair> >::iterator it;
for (it=edges.begin(); it!=edges.end(); it++){
int u = it->second.first;
int v = it->second.second;
int set_u = ds.find(u);
int set_v = ds.find(v);
if (set_u != set_v){
cout << u << " - " << v << endl;
mst_wt += it->first;
ds.merge(set_u, set_v);
}
}
return mst_wt;
}
int main(){
int V = 9, E = 14;
Graph g(V, E);
g.addEdge(0, 1, 4);
g.addEdge(0, 7, 8);
g.addEdge(1, 2, 8);
g.addEdge(1, 7, 11);
g.addEdge(2, 3, 7);
g.addEdge(2, 8, 2);
g.addEdge(2, 5, 4);
g.addEdge(3, 4, 9);
g.addEdge(3, 5, 14);
g.addEdge(4, 5, 10);
g.addEdge(5, 6, 2);
g.addEdge(6, 7, 1);
g.addEdge(6, 8, 6);
g.addEdge(7, 8, 7);
cout << "Edges of MST are \n";
int mst_wt = g.kruskalMST();
cout << "\nWeight of MST is " << mst_wt;
return 0;
}
Edges of MST are
6 - 7
2 - 8
5 - 6
0 - 1
2 - 5
2 - 3
0 - 7
3 - 4
Weight of MST is 37
|
[
{
"code": null,
"e": 1176,
"s": 1062,
"text": "In this tutorial, we will be discussing a program to understand Kruskalβs minimum spanning tree using STL in C++."
},
{
"code": null,
"e": 1327,
"s": 1176,
"text": "For this, we will be provided with a connected, undirected and weighted graph. Our task is to calculate the Minimum spanning tree for the given graph."
},
{
"code": null,
"e": 1338,
"s": 1327,
"text": " Live Demo"
},
{
"code": null,
"e": 3162,
"s": 1338,
"text": "#include<bits/stdc++.h>\nusing namespace std;\ntypedef pair<int, int> iPair;\n//structure for graph\nstruct Graph{\n int V, E;\n vector< pair<int, iPair> > edges;\n Graph(int V, int E){\n this->V = V;\n this->E = E;\n }\n void addEdge(int u, int v, int w){\n edges.push_back({w, {u, v}});\n }\n int kruskalMST();\n};\nstruct DisjointSets{\n int *parent, *rnk;\n int n;\n DisjointSets(int n){\n this->n = n;\n parent = new int[n+1];\n rnk = new int[n+1];\n for (int i = 0; i <= n; i++){\n rnk[i] = 0;\n parent[i] = i;\n }\n}\nint find(int u){\n if (u != parent[u])\n parent[u] = find(parent[u]);\n return parent[u];\n}\nvoid merge(int x, int y){\n x = find(x), y = find(y);\n if (rnk[x] > rnk[y])\n parent[y] = x;\n else\n parent[x] = y;\n if (rnk[x] == rnk[y])\n rnk[y]++;\n }\n};\nint Graph::kruskalMST(){\n int mst_wt = 0;\n sort(edges.begin(), edges.end());\n DisjointSets ds(V);\n vector< pair<int, iPair> >::iterator it;\n for (it=edges.begin(); it!=edges.end(); it++){\n int u = it->second.first;\n int v = it->second.second;\n int set_u = ds.find(u);\n int set_v = ds.find(v);\n if (set_u != set_v){\n cout << u << \" - \" << v << endl;\n mst_wt += it->first;\n ds.merge(set_u, set_v);\n }\n }\n return mst_wt;\n}\nint main(){\n int V = 9, E = 14;\n Graph g(V, E);\n g.addEdge(0, 1, 4);\n g.addEdge(0, 7, 8);\n g.addEdge(1, 2, 8);\n g.addEdge(1, 7, 11);\n g.addEdge(2, 3, 7);\n g.addEdge(2, 8, 2);\n g.addEdge(2, 5, 4);\n g.addEdge(3, 4, 9);\n g.addEdge(3, 5, 14);\n g.addEdge(4, 5, 10);\n g.addEdge(5, 6, 2);\n g.addEdge(6, 7, 1);\n g.addEdge(6, 8, 6);\n g.addEdge(7, 8, 7);\n cout << \"Edges of MST are \\n\";\n int mst_wt = g.kruskalMST();\n cout << \"\\nWeight of MST is \" << mst_wt;\n return 0;\n}"
},
{
"code": null,
"e": 3247,
"s": 3162,
"text": "Edges of MST are\n6 - 7\n2 - 8\n5 - 6\n0 - 1\n2 - 5\n2 - 3\n0 - 7\n3 - 4\nWeight of MST is 37"
}
] |
Of Astronomy, Stars and Data. A data-driven explanation of the... | by Sowmya Krishnan | Towards Data Science
|
Astronomy is one of the most ancient, data-driven sciences. The late 1800s saw the rise of βAstrophotographyβ, a photography technique employed to capture and create a data-bank of photographic plates of astronomical objects, celestial events, and areas of the night sky.
As early as 1875, Harvard College Observatory began employing an increasing number of women assistants (though this decision was more from an economic perspective than an ethical one) to work with the glass plates. These women would work for six days a week to catalog, compute and interpret data from these plates, thereby mapping the universe.
With an inordinate amount of data being produced using modern instruments, astronomy has become more data driven than ever before. While working on the βstarsβ data set sourced from Rβs βdslabsβ package, I came across an interesting exercise to analyze a set of real astronomical data to inspect properties of stars, the different categories they belong to, their apparent & absolute magnitudes, spectral types and surface temperatures.
A little background in stellar evolution and properties essential to studying and categorizing a star would further help understand this exercise.
Consider two sources of light β a flashlight in your hand and a distant streetlight a certain point away. Superficially, one would come to a conclusion that the flashlight in our hand is brighter than the far away streetlight. In essence, this problem translates to stellar brightness. A casual glance at stars does not help reveal if the star is a nearby glowing ember or a distant large beacon. To distinguish between how bright a star looks and how bright it really is, astronomers make use of the apparent vs absolute magnitude measure.
As is understood from the name, apparent brightness is the relative brightness of a star perceived by an observer on the Earth whereas absolute brightness is the same observerβs perception if all the stars were to be magical placed at the same standard distance.
One way of measuring apparent brightness is in terms of units of photos per second. A more convenient way is to express this as a ratio of a prominent bright star with the actual star. Sirius, the brightest star, has an apparent magnitude of -1.46, while the faintest stars visible to the naked eye have magnitudes of about 6.
The apparent brightness of a star, majorly, depends on two factors:
1. Actual Brightness
2. Distance from the Earth
On the other hand, absolute magnitude reflects the true amount of light emitted by the star. We therefore express this value as the luminosity of the star.
Henry Draper was the first to photograph stellar spectra in the year 1872. The βwomen computersβ at Harvard subsequently carried out his dream of photographing and cataloging all the bright stars in the sky. Spectral classes were soon arranged according to temperature and had letter designations in a sequence that went from hot to cool in the order β O, B, A, F, G, K and M. Type O stars have the highest surface temperatures that can be as hot as 30,000 Kelvins and Type M stars can be as cool as 3,000 Kelvins.
Letβs color the points in the data set by star type. This classification describes the properties of the starβs spectrum, the amount of light produced at various wavelengths.
stars %>% ggplot(aes(log10(temp), magnitude, col=type)) + geom_point() + geom_text(aes(label = star)) + scale_x_reverse() + scale_y_reverse()
A density plot of temperature shows that majority of the stars have a low temperature.
stars %>% ggplot(aes(temp)) + geom_density()
Below give is a scatter plot of the data with temperature on the x-axis and magnitude on the y-axis. Most stars are shown to follow a decreasing exponential trend. These are also known as the main sequence stars that we will talk about later.
stars %>% ggplot(aes(temp, magnitude)) + geom_point()
For various reasons, scientists do not always follow straight conventions when making plots, and astronomers usually transform values of star luminosity and temperature before plotting. Weβll flip the y-axis so that lower values for magnitude will shift towards the top of the axis.
stars %>% ggplot(aes(log10(temp), magnitude)) + geom_point() + scale_x_reverse() + scale_y_reverse()
From Stefan-Boltzmann Law, we know that:
Like black bodies, stars also emit radiations. Hence, we can make use of the above three properties (luminosity, radius and temperature) to compare and categorize stars. Ejnar Hertzsprung and Henry Russells plotted the now famous H-R Diagram which became a fundamental tool of modern astronomy. Plotting spectral classification against absolute magnitude, they found that most stars lay on certain regions in the diagram.
Noticeable empty zones give rise to a question if stars necessarily appear throughout the diagram or only at certain combinations of temperature and luminosity? Turns out that stars falling in regions other than the four highlighted above, are rare or non-existent.
The four main star sequences are as follows:
Main Sequence
The narrow band of stars running from the upper left to the lower left corner forms the Main sequence. These stars the mirror the fusion reactions within the Sunβs core. The larger stars in this sequence (high luminosity, high temperature) undergo rapid fusion reactions. Therefore, higher the position of the star in this sequence, shorter its life. And lower the position, longer the life.
Giants
The cluster of stars located above the Main Sequence in the upper right-hand corner is termed as Giants. These stars mostly appear red in color owing to their large radius and low surface temperatures. Having burnt all the hydrogen into helium, these stars have entered the final stage of their life.
Super Giants
With a radii larger than those of the Red Giants, the Super giants are scattered above the Main Sequence and Giant stars. The Helium-carbon fusion process happens much faster in these stars due to their masses and hence they never become Giants. Super Giants can fuse carbon into oxygen, oxygen into neon, neon into magnesium, magnesium into silicon, and finally, silicon into iron. With an iron core, the super-giant will ultimately die in a supernova explosion. Depending on the original mass of the star, the result would be a neutron star or a black hole.
White Dwarfs
The few stars found in the lower left corner of the Main Sequence stars. The fusion reaction for these stars stops with carbon and the core eventually begins to shrink. The outer layer dissipates and all that remains is the small cool core of the star. White dwarfs have temperatures that result in yellowish-white to bluish-white colors.
Since we can now identify whether specific stars are main sequence stars, red giants or white dwarfs, letβs now label all the stars in our data set to locate some specific stars.
stars %>% ggplot(aes(log10(temp), magnitude)) + geom_point() + geom_text(aes(label = star)) + scale_x_reverse() + scale_y_reverse()
We know that Super Giants are stars with lowest temperature and highest luminosity. Looking at the plot above, we see Betelguese and Antares are the super giants in this dataset. While Van Maanenβs the least luminous star in the sample.
If you look at the plot a little more closely, youβll see the Sun present in the Main sequence of stars. So is Alpha Centauria.
There is a place in the H-R diagram where the stars are just exhausting their nuclear fuel. This has been labelled as the βmain sequence turn-offβ region by the astronomers. This group will contain no main sequence O type stars. Older the cluster, more of the main sequence stars will disappear. They would transform into giants, white dwarfs, or super giants. An important conclusion that can be drawn here is that
The H-R diagram can serve as a tool for dating groups of stars that formed together.
This is an extremely important application that enables astronomers to further probe the ages of stars across galaxies.
|
[
{
"code": null,
"e": 319,
"s": 47,
"text": "Astronomy is one of the most ancient, data-driven sciences. The late 1800s saw the rise of βAstrophotographyβ, a photography technique employed to capture and create a data-bank of photographic plates of astronomical objects, celestial events, and areas of the night sky."
},
{
"code": null,
"e": 665,
"s": 319,
"text": "As early as 1875, Harvard College Observatory began employing an increasing number of women assistants (though this decision was more from an economic perspective than an ethical one) to work with the glass plates. These women would work for six days a week to catalog, compute and interpret data from these plates, thereby mapping the universe."
},
{
"code": null,
"e": 1102,
"s": 665,
"text": "With an inordinate amount of data being produced using modern instruments, astronomy has become more data driven than ever before. While working on the βstarsβ data set sourced from Rβs βdslabsβ package, I came across an interesting exercise to analyze a set of real astronomical data to inspect properties of stars, the different categories they belong to, their apparent & absolute magnitudes, spectral types and surface temperatures."
},
{
"code": null,
"e": 1249,
"s": 1102,
"text": "A little background in stellar evolution and properties essential to studying and categorizing a star would further help understand this exercise."
},
{
"code": null,
"e": 1790,
"s": 1249,
"text": "Consider two sources of light β a flashlight in your hand and a distant streetlight a certain point away. Superficially, one would come to a conclusion that the flashlight in our hand is brighter than the far away streetlight. In essence, this problem translates to stellar brightness. A casual glance at stars does not help reveal if the star is a nearby glowing ember or a distant large beacon. To distinguish between how bright a star looks and how bright it really is, astronomers make use of the apparent vs absolute magnitude measure."
},
{
"code": null,
"e": 2053,
"s": 1790,
"text": "As is understood from the name, apparent brightness is the relative brightness of a star perceived by an observer on the Earth whereas absolute brightness is the same observerβs perception if all the stars were to be magical placed at the same standard distance."
},
{
"code": null,
"e": 2380,
"s": 2053,
"text": "One way of measuring apparent brightness is in terms of units of photos per second. A more convenient way is to express this as a ratio of a prominent bright star with the actual star. Sirius, the brightest star, has an apparent magnitude of -1.46, while the faintest stars visible to the naked eye have magnitudes of about 6."
},
{
"code": null,
"e": 2448,
"s": 2380,
"text": "The apparent brightness of a star, majorly, depends on two factors:"
},
{
"code": null,
"e": 2469,
"s": 2448,
"text": "1. Actual Brightness"
},
{
"code": null,
"e": 2496,
"s": 2469,
"text": "2. Distance from the Earth"
},
{
"code": null,
"e": 2652,
"s": 2496,
"text": "On the other hand, absolute magnitude reflects the true amount of light emitted by the star. We therefore express this value as the luminosity of the star."
},
{
"code": null,
"e": 3167,
"s": 2652,
"text": "Henry Draper was the first to photograph stellar spectra in the year 1872. The βwomen computersβ at Harvard subsequently carried out his dream of photographing and cataloging all the bright stars in the sky. Spectral classes were soon arranged according to temperature and had letter designations in a sequence that went from hot to cool in the order β O, B, A, F, G, K and M. Type O stars have the highest surface temperatures that can be as hot as 30,000 Kelvins and Type M stars can be as cool as 3,000 Kelvins."
},
{
"code": null,
"e": 3342,
"s": 3167,
"text": "Letβs color the points in the data set by star type. This classification describes the properties of the starβs spectrum, the amount of light produced at various wavelengths."
},
{
"code": null,
"e": 3499,
"s": 3342,
"text": "stars %>% ggplot(aes(log10(temp), magnitude, col=type)) + geom_point() + geom_text(aes(label = star)) + scale_x_reverse() + scale_y_reverse()"
},
{
"code": null,
"e": 3586,
"s": 3499,
"text": "A density plot of temperature shows that majority of the stars have a low temperature."
},
{
"code": null,
"e": 3637,
"s": 3586,
"text": "stars %>% ggplot(aes(temp)) + geom_density()"
},
{
"code": null,
"e": 3880,
"s": 3637,
"text": "Below give is a scatter plot of the data with temperature on the x-axis and magnitude on the y-axis. Most stars are shown to follow a decreasing exponential trend. These are also known as the main sequence stars that we will talk about later."
},
{
"code": null,
"e": 3940,
"s": 3880,
"text": "stars %>% ggplot(aes(temp, magnitude)) + geom_point()"
},
{
"code": null,
"e": 4223,
"s": 3940,
"text": "For various reasons, scientists do not always follow straight conventions when making plots, and astronomers usually transform values of star luminosity and temperature before plotting. Weβll flip the y-axis so that lower values for magnitude will shift towards the top of the axis."
},
{
"code": null,
"e": 4336,
"s": 4223,
"text": "stars %>% ggplot(aes(log10(temp), magnitude)) + geom_point() + scale_x_reverse() + scale_y_reverse()"
},
{
"code": null,
"e": 4377,
"s": 4336,
"text": "From Stefan-Boltzmann Law, we know that:"
},
{
"code": null,
"e": 4799,
"s": 4377,
"text": "Like black bodies, stars also emit radiations. Hence, we can make use of the above three properties (luminosity, radius and temperature) to compare and categorize stars. Ejnar Hertzsprung and Henry Russells plotted the now famous H-R Diagram which became a fundamental tool of modern astronomy. Plotting spectral classification against absolute magnitude, they found that most stars lay on certain regions in the diagram."
},
{
"code": null,
"e": 5065,
"s": 4799,
"text": "Noticeable empty zones give rise to a question if stars necessarily appear throughout the diagram or only at certain combinations of temperature and luminosity? Turns out that stars falling in regions other than the four highlighted above, are rare or non-existent."
},
{
"code": null,
"e": 5110,
"s": 5065,
"text": "The four main star sequences are as follows:"
},
{
"code": null,
"e": 5124,
"s": 5110,
"text": "Main Sequence"
},
{
"code": null,
"e": 5516,
"s": 5124,
"text": "The narrow band of stars running from the upper left to the lower left corner forms the Main sequence. These stars the mirror the fusion reactions within the Sunβs core. The larger stars in this sequence (high luminosity, high temperature) undergo rapid fusion reactions. Therefore, higher the position of the star in this sequence, shorter its life. And lower the position, longer the life."
},
{
"code": null,
"e": 5523,
"s": 5516,
"text": "Giants"
},
{
"code": null,
"e": 5824,
"s": 5523,
"text": "The cluster of stars located above the Main Sequence in the upper right-hand corner is termed as Giants. These stars mostly appear red in color owing to their large radius and low surface temperatures. Having burnt all the hydrogen into helium, these stars have entered the final stage of their life."
},
{
"code": null,
"e": 5837,
"s": 5824,
"text": "Super Giants"
},
{
"code": null,
"e": 6397,
"s": 5837,
"text": "With a radii larger than those of the Red Giants, the Super giants are scattered above the Main Sequence and Giant stars. The Helium-carbon fusion process happens much faster in these stars due to their masses and hence they never become Giants. Super Giants can fuse carbon into oxygen, oxygen into neon, neon into magnesium, magnesium into silicon, and finally, silicon into iron. With an iron core, the super-giant will ultimately die in a supernova explosion. Depending on the original mass of the star, the result would be a neutron star or a black hole."
},
{
"code": null,
"e": 6410,
"s": 6397,
"text": "White Dwarfs"
},
{
"code": null,
"e": 6749,
"s": 6410,
"text": "The few stars found in the lower left corner of the Main Sequence stars. The fusion reaction for these stars stops with carbon and the core eventually begins to shrink. The outer layer dissipates and all that remains is the small cool core of the star. White dwarfs have temperatures that result in yellowish-white to bluish-white colors."
},
{
"code": null,
"e": 6928,
"s": 6749,
"text": "Since we can now identify whether specific stars are main sequence stars, red giants or white dwarfs, letβs now label all the stars in our data set to locate some specific stars."
},
{
"code": null,
"e": 7075,
"s": 6928,
"text": "stars %>% ggplot(aes(log10(temp), magnitude)) + geom_point() + geom_text(aes(label = star)) + scale_x_reverse() + scale_y_reverse()"
},
{
"code": null,
"e": 7312,
"s": 7075,
"text": "We know that Super Giants are stars with lowest temperature and highest luminosity. Looking at the plot above, we see Betelguese and Antares are the super giants in this dataset. While Van Maanenβs the least luminous star in the sample."
},
{
"code": null,
"e": 7440,
"s": 7312,
"text": "If you look at the plot a little more closely, youβll see the Sun present in the Main sequence of stars. So is Alpha Centauria."
},
{
"code": null,
"e": 7856,
"s": 7440,
"text": "There is a place in the H-R diagram where the stars are just exhausting their nuclear fuel. This has been labelled as the βmain sequence turn-offβ region by the astronomers. This group will contain no main sequence O type stars. Older the cluster, more of the main sequence stars will disappear. They would transform into giants, white dwarfs, or super giants. An important conclusion that can be drawn here is that"
},
{
"code": null,
"e": 7941,
"s": 7856,
"text": "The H-R diagram can serve as a tool for dating groups of stars that formed together."
}
] |
How to print content of JavaScript object?
|
To print the content of a JavaScript object, try to run the following code. The code prints the object, with its properties and values β
<!DOCTYPE html>
<html>
<body>
<script>
function display(obj) {
var res = '';
for (var a in obj) {
res += a + ': ' + obj[a] + '\n';
}
alert(res);
}
var newObject = {'Amit': 70778, 'Sachin': 87547, 'Saurav': 57535};
display(newObject);
</script>
</body>
</html>
|
[
{
"code": null,
"e": 1199,
"s": 1062,
"text": "To print the content of a JavaScript object, try to run the following code. The code prints the object, with its properties and values β"
},
{
"code": null,
"e": 1577,
"s": 1199,
"text": "<!DOCTYPE html>\n<html>\n <body>\n <script>\n function display(obj) {\n var res = '';\n for (var a in obj) {\n res += a + ': ' + obj[a] + '\\n';\n }\n alert(res);\n }\n\n var newObject = {'Amit': 70778, 'Sachin': 87547, 'Saurav': 57535};\n display(newObject);\n </script>\n </body>\n</html>"
}
] |
Generating text with Recurrent Neural Networks based on the work of F. Pessoa | Towards Data Science
|
Sequences of discrete tokens can be found in many applications, namely words in a text, notes in a musical composition, pixels in an image, actions in a reinforcement learning agent, etc [1]. These sequences often show a strong correlation between consecutive or nearby tokens. The correlations on words in a sentence or characters in words express the underlying semantics and language characteristics. The next token in the sequence x_n can be modeled as:
where x_i represents the ith token in the sequence. In Natural Language Processing (NLP), these are defined as language models. Usually, each token stands for a separate word or n-gram. The output generated is a probability distribution from which we can sample to generate the next token in the sequence. These models are also known as recurrent, as we can apply this generative process recurrently to create entire new sequences of tokens.
One particular type of generative model often used to tackle problems with sequences of discrete tokens is Recurrent Neural Networks (RNN). In a simpler neural network, a fixed-dimensional feature representation is transformed several times by different non-linear functions. In an RNN, these transformations are also repeated in time, which means that at every time step, a new input is processed, and a new output is generated. They can effectively capture semantically rich representations of the input sequences [2]. RNN showed this capacity in different settings, such as generating structured text, original images (on a per pixels basis), or even modeling user behavior on online services.
Our task is to generate original text that resembles a training corpus. It is an unsupervised task, as we do not have access to any labeling or target variable. We start by creating a word embedding that maps each character to a vector with a parameterized dimension. For each character, the model looks up the embedding and feeds the result to a stack of Long Short-Term Memory (LSTM) layers, a specific type of RNN. These were developed to extend the traditional capacity of RNNs to model long-term dependencies and counter the vanishing gradient problem. The output of our network is a dense layer with a number of units equal to the vocabulary size. We did not define an activation function for this layer; it simply outputs one logit for each character in the vocabulary. We use these values to later sample from a categorical distribution.
In this article, we use the work of Fernando Pessoa, one of the most significant literary figures of the 20th century and one of the greatest poets in the Portuguese language. This dataset is now publicly available on Kaggle and consists of more than 4300 poems, essays, and other writings [3].
The code is also available on Kaggle and GitHub.
This article belongs to a series of articles on Deep Learning using TensorFlow:
Transfer Learning and Data Augmentation applied to the Simpsons Image Dataset
Generating Text With Recurrent Neural Networks based on the Work of F. Pessoa
Neural Machine Translation using a Seq2Seq Architecture and Attention (ENG to POR)
Residual Networks from Scratch Applied to Computer Vision
The dataset comprises several texts written by the author under his own name but also using different heteronyms and pseudonyms. Each one has his own style of writing, which could be interesting to learn separately. Nevertheless, to efficiently train Deep Neural Networks (DNN), we need a large dataset and that was the reason to build a single model.
F. Pessoa lived part of his youth in South Africa, where he was exposed to the English language. That is why part of his work is written in English. To avoid introducing noise, we remove most of the English texts from the training dataset.
import pandas as pdimport numpy as npfrom tensorflow.keras.preprocessing.text import Tokenizerimport tensorflow as tfimport astimport osimport jsonimport matplotlib.pyplot as pltfrom nltk import tokenizeimport seaborn as snsf_pessoa = pd.read_csv(os.getcwd() + '/f_pessoa_v2.csv')texts = f_pessoa.copy()# Removing all pseudonyms that wrote in English.texts = texts[~texts['author'].isin(['Alexander Search', 'David Merrick', 'Charles Robert Anon', 'I. I. Crosse'])]texts['text'] = texts['text'].apply(lambda t: ast.literal_eval(t))texts = texts.reset_index().drop('index', axis=1)texts = texts['text'].tolist()texts = np.concatenate(texts)texts = np.asarray(texts)texts_p = " ".join(texts)# we will be truncating large texts soon, so this code only tries to reduce the # sequence size by splitting the texts that seem to be significantly larger than # the rest. Otherwise, we try to use the structure provided in the data itself_, ax = plt.subplots(1, 2, figsize=(15, 5))mylen = np.vectorize(len)sns.histplot(mylen(texts), bins=50, ax=ax[0])ax[0].set_title('Histogram of the number of characters in each \nchunk of text BEFORE splitting sentences', fontsize=16)large_texts = texts[mylen(texts)>350]large_texts_p = " ".join(large_texts)large_texts = tokenize.sent_tokenize(large_texts_p)texts = np.concatenate((texts[~(mylen(texts)>350)], large_texts))ax[1].set_title('Histogram of the number of characters in each \nchunk of text AFTER splitting sentences', fontsize=16)sns.histplot(mylen(texts), bins=50, ax=ax[1]);print(f'Length of texts dataset: {len(texts_p)} characters')Length of texts dataset: 5811145 characters
After cleaning up the texts, we end up with more than 5.8M of characters. Notice that to avoid losing data when normalizing the text length of our sequences, we split the largest sequences by sentence. The difference in the distribution of the sequence length can be seen in the histograms above. We can preview some of the sequences.
print(texts[97:106])['O burburinho da aΜgua' 'O burburinho da aΜgua' 'No regato que se espalha' 'EΜ como a ilusaΜo que eΜ maΜgoa' 'Quando a verdade a baralha.' 'β A uΜnica vantagem de estudar eΜ gozar o quanto os outros naΜo disseram.' 'β A arte eΜ um isolamento. Todo o artista deve buscar isolar os outros, levar-lhes aΜs almas o desejo de estarem soΜs. O triunfo supremo de um artista eΜ quando a ler suas obras o leitor prefere teΜ-las e naΜo as ler. NaΜo eΜ porque isto acontecΜ§a aos consagrados; eΜ porque eΜ o maior tributo (...)' 'β Ser luΜcido eΜ estar indisposto consigo proΜprio. O legiΜtimo estado de espiΜrito com respeito a olhar para dentro de si proΜprio eΜ o estado (...) de quem olha nervos e indecisoΜes.' 'A uΜnica atitude intelectual digna de uma criatura superior eΜ a de uma calma e fria compaixaΜo por tudo quanto naΜo eΜ ele proΜprio. NaΜo que essa atitude tenha o miΜnimo cunho de justa e verdadeira; mas eΜ taΜo invejaΜvel que eΜ preciso teΜ-la.']
More importantly, we can assess the number of unique characters, which is our vocabulary size.
vocab = sorted(set(texts_p))print(f'{len(vocab)} unique characters in texts')156 unique characters in texts
Before training, we need to convert the strings to some numerical representation. We started by tokenizing the text with some important aspects in mind. We considered an unlimited number of tokens and created them at the character level. We did not filter any character and kept the original capitalization. We then use the tokenizer to map our texts to encoded sequences.
def create_character_tokenizer(list_of_strings): tokenizer = Tokenizer(filters=None, char_level=True, split=None, lower=False) tokenizer.fit_on_texts(list_of_strings) return tokenizertokenizer = create_character_tokenizer(texts)tokenizer_config = tokenizer.get_config()word_counts = json.loads(tokenizer_config['word_counts'])index_word = json.loads(tokenizer_config['index_word'])word_index = json.loads(tokenizer_config['word_index'])def strings_to_sequences(tokenizer, list_of_strings): sentence_seq = tokenizer.texts_to_sequences(list_of_strings) return sentence_seqseq_texts = strings_to_sequences(tokenizer, texts)
We can see an example of this encoding.
print('Original sequence: \n' + texts[0] + '\n')print('Encoded sequence: ')print(seq_texts[0])Original sequence: Diana atraveΜs dos ramosEncoded sequence: [46, 6, 3, 8, 3, 1, 3, 9, 7, 3, 19, 26, 5, 1, 10, 4, 5, 1, 7, 3, 11, 4, 5]
We also need to normalize the length of our sequences, for which we define a length of 300 characters. Sequences smaller than 300 are padded with zeros, while sequences bigger than 300 are truncated.
mylen = np.vectorize(len)print(max(mylen(texts)))print(np.round(np.mean(mylen(texts))))137771.0def make_padded_dataset(sequences): padded_sequence = tf.keras.preprocessing.sequence.pad_sequences(sequences, maxlen=300, padding='pre', truncating='pre', value=0) return padded_sequencepadded_sequences = make_padded_dataset(seq_texts)
The RNN works by receiving a sequence of characters and predicting the next character in the sequence. At training time, the model receives an input sequence and a target sequence, which is shifted by one.
For example, the expression Diana atraveΜs dos ramos is the first verse of the first poem on our dataset. The poem is from Ricardo Reis, one of the many heteronyms of F. Pessoa. Given the input, Diana atraveΜs dos ramo the correct prediction is iana atraveΜs dos ramos. Notice that the prediction is the same length as the input.
Another decision we took was to build our RNN to be stateful, which means that its internal state is maintained across batches. To be effective, we need to make sure that each batch element follows on from the corresponding element of the preceding batch.
def create_inputs_and_targets(array_of_sequences, batch_size=32): input_seq = array_of_sequences[:,:-1] target_seq = array_of_sequences[:,1:] # Prepare the batches and ensure that is ready to be fed to a stateful RNN num_examples = input_seq.shape[0] num_processed_examples = num_examples - (num_examples % batch_size) input_seq = input_seq[:num_processed_examples] target_seq = target_seq[:num_processed_examples] steps = int(num_processed_examples / 32) inx = np.empty((0,), dtype=np.int32) for i in range(steps): inx = np.concatenate((inx, i + np.arange(0, num_processed_examples, steps))) input_seq_stateful = input_seq[inx] target_seq_stateful = target_seq[inx] # Split data between training and validation sets num_train_examples = int(batch_size * ((0.8 * num_processed_examples) // batch_size)) input_train = input_seq_stateful[:num_train_examples] target_train = target_seq_stateful[:num_train_examples] input_valid = input_seq_stateful[num_train_examples:] target_valid = target_seq_stateful[num_train_examples:] # Create datasets objects for training and validation data dataset_train = tf.data.Dataset.from_tensor_slices((input_train, target_train)) dataset_train = dataset_train.batch(batch_size, drop_remainder=True) dataset_valid = tf.data.Dataset.from_tensor_slices((input_valid, target_valid)) dataset_valid = dataset_valid.batch(batch_size, drop_remainder=True) return (dataset_train, dataset_valid) train_data, valid_data = create_inputs_and_targets(padded_sequences)
We started by defining an embedding layer that turns our indexes of characters into dense vectors of fixed size. It is important to note that padded values are masked in this layer, which means they are simply ignored. Next, we stacked 2 unidirectional stateful LSTM layers, each with 512 units. These layers have the potential to learn long-term dependencies; however, they are computationally expensive to train. In between them, we introduced a dropout layer. Finally, the last layer outputs one logit for each character in the vocabulary. These are the log-likelihood of each character according to the model. Notice that we get a total of about 4M parameters to train.
def get_model(vocab_size, batch_size): model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=vocab_size, output_dim = 256, mask_zero=True, batch_input_shape=(batch_size, None)), tf.keras.layers.LSTM(units=512, return_sequences=True,stateful=True), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(units=512, return_sequences=True,stateful=True), tf.keras.layers.Dense(units=vocab_size) ]) return modelbatch_size=32model = get_model(len(tokenizer.word_index) + 1, batch_size)model.summary()Model: "sequential"_________________________________________________________________Layer (type) Output Shape Param # =================================================================embedding (Embedding) (32, None, 256) 40192 _________________________________________________________________lstm (LSTM) (32, None, 512) 1574912 _________________________________________________________________dropout (Dropout) (32, None, 512) 0 _________________________________________________________________lstm_1 (LSTM) (32, None, 512) 2099200 _________________________________________________________________dense (Dense) (32, None, 157) 80541 =================================================================Total params: 3,794,845Trainable params: 3,794,845Non-trainable params: 0_________________________________________________________________checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(filepath='./models/ckpt', save_weights_only=True, save_best_only=True)model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['sparse_categorical_accuracy'])history = model.fit(train_data, epochs=30, validation_data=valid_data, callbacks=[checkpoint_callback, tf.keras.callbacks.EarlyStopping(patience=2)])Epoch 1/302023/2023 [==============================] - 1041s 512ms/step - loss: 0.5216 - sparse_categorical_accuracy: 0.3516 - val_loss: 0.3298 - val_sparse_categorical_accuracy: 0.5669[...]Epoch 18/302023/2023 [==============================] - 1031s 510ms/step - loss: 0.2495 - sparse_categorical_accuracy: 0.6478 - val_loss: 0.2756 - val_sparse_categorical_accuracy: 0.6268def model_history(history): history_dict = dict() for k, v in history.history.items(): history_dict[k] = [float(val) for val in history.history[k]] return history_dicthistory_dict = model_history(history)
The training is quite slow even using GPU (despite reducing the training time by a factor of 15 compared to CPU), and recall that we only stacked two LSTM layers with a limited number of units. From the figure below, we can see a rapid increase of the accuracy on both the training and validation datasets and then a steady climb for several epochs. Our callback is eventually executed (when there is no increase in the validation accuracy for more than 2 epochs) to stop the training process. There was no sign of overfitting.
def plot_history(history_dict): plt.figure(figsize=(15,5)) plt.subplot(121) plt.plot(history_dict['sparse_categorical_accuracy']) plt.plot(history_dict['val_sparse_categorical_accuracy']) plt.title('Accuracy vs. epochs') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.xticks(np.arange(len(history_dict['sparse_categorical_accuracy']))) ax = plt.gca() ax.set_xticklabels(1 + np.arange(len(history_dict['sparse_categorical_accuracy']))) plt.legend(['Training', 'Validation'], loc='lower right') plt.subplot(122) plt.plot(history_dict['loss']) plt.plot(history_dict['val_loss']) plt.title('Loss vs. epochs') plt.ylabel('Loss') plt.xlabel('Epoch') plt.xticks(np.arange(len(history_dict['sparse_categorical_accuracy']))) ax = plt.gca() ax.set_xticklabels(1 + np.arange(len(history_dict['sparse_categorical_accuracy']))) plt.legend(['Training', 'Validation'], loc='upper right') plt.show() plot_history(history_dict)
model = get_model(len(tokenizer.word_index) + 1, batch_size=1)model.load_weights(tf.train.latest_checkpoint('./models/')).expect_partial()def get_logits(model, token_sequence, initial_state1=None, initial_state2=None, initial_state3=None): token_sequence = np.asarray(token_sequence) if initial_state1 is not None: # set states for all recurrent layers model.layers[1].states = initial_state1 model.layers[3].states = initial_state2 model.layers[5].states = initial_state3 else: model.layers[1].reset_states() model.layers[3].reset_states() model.layers[5].reset_states() logit = model.predict(token_sequence) logit = logit[:,-1,:] return logitdef sample_token(logits): pred = tf.random.categorical(logits, num_samples=1).numpy()[0] return pred[0]
To generate text from our model, we need to specify a seed string to get the network started. Next, we tokenize the initial string and reset the state of the network. The string is then converted to a tensor with a batch size of 1 to be fed to our model. We used the prediction from the last time step to build a categorical distribution and sample from it afterward. Using the same state of our network and the previously sampled token, we can repeat the prediction step until we get the final sequence with the specified size.
The resulting original text is quite interesting to analyze. Remember that our RNN had to learn the Portuguese language from scratch with a fairly small dataset. No explicit information such as syntax or semantics is provided to the model other than practical examples on writings in Portuguese. The dataset is also fairly small for the task. Nevertheless, there are interesting learnings to take notice of. For example, in terms of punctuation, the quotation marks are used correctly, showing the understanding that they are required to open and close. In sentences such as βDesassossego naΜo poderia!... FaleΜncias no meu coracΜ§aΜo...β or βAs cancΜ§oΜes... eΜ um sono de ouvir... FiccΜ§aΜo tanto!...β we can almost grasp some of the rentlessness of Fernando Pessoa. On the other hand, we see that the meaning or intention is not something that an RNN can capture, and we can also identify some orthographic errors.
init_string = 'Desassossego'num_generation_steps = 500token_sequence = tokenizer.texts_to_sequences([init_string])initial_state_1, initial_state_2, initial_state_3 = None, None, Noneinput_sequence = token_sequencefor _ in range(num_generation_steps): logits = get_logits(model, input_sequence, initial_state1=initial_state_1, initial_state2=initial_state_2, initial_state3=initial_state_3) sampled_token = sample_token(logits) token_sequence[0].append(sampled_token) input_sequence = [[sampled_token]] initial_state_1 = model.layers[1].states initial_state_2 = model.layers[3].states initial_state_2 = model.layers[5].states print(tokenizer.sequences_to_texts(token_sequence)[0][::2])Desassossego naΜo poderia!... FaleΜncias no meu coracΜ§aΜo... Esse reer sobre os bracΜ§os dos meus caminhos e ignorantes possamos Β«exensacΜ§aΜo simboΜlicaΒ» e em Natureza, e a noite nova da auseΜncia de cada? NaΜo pense de bem entendida uma orientada prosa). V. como fui... As cancΜ§oΜes... eΜ um sono de ouvir... FiccΜ§aΜo tanto!... Vejo outro olhar pela Tristeza da cadeira, rainha para a Carta, a noite. Depois no paganismo que se sente no espacΜ§o real e de criar uma pedra de tradicΜ§aΜo socioloΜgica para implicar o de Aristoclator S
For this task, the preprocessing of the data is challenging. We need to ensure that we have our input sequence encoded in a suitable way for the RNN to capture the available semantic representation effectively. RNNs are computationally expensive to train, so we decided to keep the structure as simple as possible.
We were able to generate text in Portuguese without proving any structural information about the language to the model other than the writings of a poet. The model learned some of the fundamental structure of the language while preserving nuances that we can consider similar to the training corpus.
This approach can be extended by increasing the depth of the model with more recurrent layers and the number of units in each layer. Hyperparameters such as the batch size can also be tuned to increase accuracy. We tested the possibility to separate by the form of writing, training one DNN with texts in prose and another with texts in poetry. The results were not satisfying, as the DNNs failed to generate text with a coherent structure. We leave it as future work.
[1] β [De Boom et al., 2018] De Boom, C., Demeester, T., and Dhoedt, B. (2018). Character-level recur-rent neural networks in practice: comparing training and sampling schemes.Neural Computing and Applications, 31(8):4001β4017.
[2] β [Sutskever et al., 2011] Sutskever, I., Martens, J., and Hinton, G. (2011). Generating text with recurrent neural networks. ICMLβ11, page 1017β1024, Madison, WI, USA. Omnipress.
[3] β https://www.kaggle.com/luisroque/the-complete-literary-works-of-fernando-pessoa
|
[
{
"code": null,
"e": 630,
"s": 172,
"text": "Sequences of discrete tokens can be found in many applications, namely words in a text, notes in a musical composition, pixels in an image, actions in a reinforcement learning agent, etc [1]. These sequences often show a strong correlation between consecutive or nearby tokens. The correlations on words in a sentence or characters in words express the underlying semantics and language characteristics. The next token in the sequence x_n can be modeled as:"
},
{
"code": null,
"e": 1072,
"s": 630,
"text": "where x_i represents the ith token in the sequence. In Natural Language Processing (NLP), these are defined as language models. Usually, each token stands for a separate word or n-gram. The output generated is a probability distribution from which we can sample to generate the next token in the sequence. These models are also known as recurrent, as we can apply this generative process recurrently to create entire new sequences of tokens."
},
{
"code": null,
"e": 1769,
"s": 1072,
"text": "One particular type of generative model often used to tackle problems with sequences of discrete tokens is Recurrent Neural Networks (RNN). In a simpler neural network, a fixed-dimensional feature representation is transformed several times by different non-linear functions. In an RNN, these transformations are also repeated in time, which means that at every time step, a new input is processed, and a new output is generated. They can effectively capture semantically rich representations of the input sequences [2]. RNN showed this capacity in different settings, such as generating structured text, original images (on a per pixels basis), or even modeling user behavior on online services."
},
{
"code": null,
"e": 2615,
"s": 1769,
"text": "Our task is to generate original text that resembles a training corpus. It is an unsupervised task, as we do not have access to any labeling or target variable. We start by creating a word embedding that maps each character to a vector with a parameterized dimension. For each character, the model looks up the embedding and feeds the result to a stack of Long Short-Term Memory (LSTM) layers, a specific type of RNN. These were developed to extend the traditional capacity of RNNs to model long-term dependencies and counter the vanishing gradient problem. The output of our network is a dense layer with a number of units equal to the vocabulary size. We did not define an activation function for this layer; it simply outputs one logit for each character in the vocabulary. We use these values to later sample from a categorical distribution."
},
{
"code": null,
"e": 2910,
"s": 2615,
"text": "In this article, we use the work of Fernando Pessoa, one of the most significant literary figures of the 20th century and one of the greatest poets in the Portuguese language. This dataset is now publicly available on Kaggle and consists of more than 4300 poems, essays, and other writings [3]."
},
{
"code": null,
"e": 2959,
"s": 2910,
"text": "The code is also available on Kaggle and GitHub."
},
{
"code": null,
"e": 3039,
"s": 2959,
"text": "This article belongs to a series of articles on Deep Learning using TensorFlow:"
},
{
"code": null,
"e": 3117,
"s": 3039,
"text": "Transfer Learning and Data Augmentation applied to the Simpsons Image Dataset"
},
{
"code": null,
"e": 3195,
"s": 3117,
"text": "Generating Text With Recurrent Neural Networks based on the Work of F. Pessoa"
},
{
"code": null,
"e": 3278,
"s": 3195,
"text": "Neural Machine Translation using a Seq2Seq Architecture and Attention (ENG to POR)"
},
{
"code": null,
"e": 3336,
"s": 3278,
"text": "Residual Networks from Scratch Applied to Computer Vision"
},
{
"code": null,
"e": 3688,
"s": 3336,
"text": "The dataset comprises several texts written by the author under his own name but also using different heteronyms and pseudonyms. Each one has his own style of writing, which could be interesting to learn separately. Nevertheless, to efficiently train Deep Neural Networks (DNN), we need a large dataset and that was the reason to build a single model."
},
{
"code": null,
"e": 3928,
"s": 3688,
"text": "F. Pessoa lived part of his youth in South Africa, where he was exposed to the English language. That is why part of his work is written in English. To avoid introducing noise, we remove most of the English texts from the training dataset."
},
{
"code": null,
"e": 5548,
"s": 3928,
"text": "import pandas as pdimport numpy as npfrom tensorflow.keras.preprocessing.text import Tokenizerimport tensorflow as tfimport astimport osimport jsonimport matplotlib.pyplot as pltfrom nltk import tokenizeimport seaborn as snsf_pessoa = pd.read_csv(os.getcwd() + '/f_pessoa_v2.csv')texts = f_pessoa.copy()# Removing all pseudonyms that wrote in English.texts = texts[~texts['author'].isin(['Alexander Search', 'David Merrick', 'Charles Robert Anon', 'I. I. Crosse'])]texts['text'] = texts['text'].apply(lambda t: ast.literal_eval(t))texts = texts.reset_index().drop('index', axis=1)texts = texts['text'].tolist()texts = np.concatenate(texts)texts = np.asarray(texts)texts_p = \" \".join(texts)# we will be truncating large texts soon, so this code only tries to reduce the # sequence size by splitting the texts that seem to be significantly larger than # the rest. Otherwise, we try to use the structure provided in the data itself_, ax = plt.subplots(1, 2, figsize=(15, 5))mylen = np.vectorize(len)sns.histplot(mylen(texts), bins=50, ax=ax[0])ax[0].set_title('Histogram of the number of characters in each \\nchunk of text BEFORE splitting sentences', fontsize=16)large_texts = texts[mylen(texts)>350]large_texts_p = \" \".join(large_texts)large_texts = tokenize.sent_tokenize(large_texts_p)texts = np.concatenate((texts[~(mylen(texts)>350)], large_texts))ax[1].set_title('Histogram of the number of characters in each \\nchunk of text AFTER splitting sentences', fontsize=16)sns.histplot(mylen(texts), bins=50, ax=ax[1]);print(f'Length of texts dataset: {len(texts_p)} characters')Length of texts dataset: 5811145 characters"
},
{
"code": null,
"e": 5883,
"s": 5548,
"text": "After cleaning up the texts, we end up with more than 5.8M of characters. Notice that to avoid losing data when normalizing the text length of our sequences, we split the largest sequences by sentence. The difference in the distribution of the sequence length can be seen in the histograms above. We can preview some of the sequences."
},
{
"code": null,
"e": 6858,
"s": 5883,
"text": "print(texts[97:106])['O burburinho da aΜgua' 'O burburinho da aΜgua' 'No regato que se espalha' 'EΜ como a ilusaΜo que eΜ maΜgoa' 'Quando a verdade a baralha.' 'β A uΜnica vantagem de estudar eΜ gozar o quanto os outros naΜo disseram.' 'β A arte eΜ um isolamento. Todo o artista deve buscar isolar os outros, levar-lhes aΜs almas o desejo de estarem soΜs. O triunfo supremo de um artista eΜ quando a ler suas obras o leitor prefere teΜ-las e naΜo as ler. NaΜo eΜ porque isto acontecΜ§a aos consagrados; eΜ porque eΜ o maior tributo (...)' 'β Ser luΜcido eΜ estar indisposto consigo proΜprio. O legiΜtimo estado de espiΜrito com respeito a olhar para dentro de si proΜprio eΜ o estado (...) de quem olha nervos e indecisoΜes.' 'A uΜnica atitude intelectual digna de uma criatura superior eΜ a de uma calma e fria compaixaΜo por tudo quanto naΜo eΜ ele proΜprio. NaΜo que essa atitude tenha o miΜnimo cunho de justa e verdadeira; mas eΜ taΜo invejaΜvel que eΜ preciso teΜ-la.']"
},
{
"code": null,
"e": 6953,
"s": 6858,
"text": "More importantly, we can assess the number of unique characters, which is our vocabulary size."
},
{
"code": null,
"e": 7061,
"s": 6953,
"text": "vocab = sorted(set(texts_p))print(f'{len(vocab)} unique characters in texts')156 unique characters in texts"
},
{
"code": null,
"e": 7434,
"s": 7061,
"text": "Before training, we need to convert the strings to some numerical representation. We started by tokenizing the text with some important aspects in mind. We considered an unlimited number of tokens and created them at the character level. We did not filter any character and kept the original capitalization. We then use the tokenizer to map our texts to encoded sequences."
},
{
"code": null,
"e": 8144,
"s": 7434,
"text": "def create_character_tokenizer(list_of_strings): tokenizer = Tokenizer(filters=None, char_level=True, split=None, lower=False) tokenizer.fit_on_texts(list_of_strings) return tokenizertokenizer = create_character_tokenizer(texts)tokenizer_config = tokenizer.get_config()word_counts = json.loads(tokenizer_config['word_counts'])index_word = json.loads(tokenizer_config['index_word'])word_index = json.loads(tokenizer_config['word_index'])def strings_to_sequences(tokenizer, list_of_strings): sentence_seq = tokenizer.texts_to_sequences(list_of_strings) return sentence_seqseq_texts = strings_to_sequences(tokenizer, texts)"
},
{
"code": null,
"e": 8184,
"s": 8144,
"text": "We can see an example of this encoding."
},
{
"code": null,
"e": 8414,
"s": 8184,
"text": "print('Original sequence: \\n' + texts[0] + '\\n')print('Encoded sequence: ')print(seq_texts[0])Original sequence: Diana atraveΜs dos ramosEncoded sequence: [46, 6, 3, 8, 3, 1, 3, 9, 7, 3, 19, 26, 5, 1, 10, 4, 5, 1, 7, 3, 11, 4, 5]"
},
{
"code": null,
"e": 8614,
"s": 8414,
"text": "We also need to normalize the length of our sequences, for which we define a length of 300 characters. Sequences smaller than 300 are padded with zeros, while sequences bigger than 300 are truncated."
},
{
"code": null,
"e": 9144,
"s": 8614,
"text": "mylen = np.vectorize(len)print(max(mylen(texts)))print(np.round(np.mean(mylen(texts))))137771.0def make_padded_dataset(sequences): padded_sequence = tf.keras.preprocessing.sequence.pad_sequences(sequences, maxlen=300, padding='pre', truncating='pre', value=0) return padded_sequencepadded_sequences = make_padded_dataset(seq_texts)"
},
{
"code": null,
"e": 9350,
"s": 9144,
"text": "The RNN works by receiving a sequence of characters and predicting the next character in the sequence. At training time, the model receives an input sequence and a target sequence, which is shifted by one."
},
{
"code": null,
"e": 9680,
"s": 9350,
"text": "For example, the expression Diana atraveΜs dos ramos is the first verse of the first poem on our dataset. The poem is from Ricardo Reis, one of the many heteronyms of F. Pessoa. Given the input, Diana atraveΜs dos ramo the correct prediction is iana atraveΜs dos ramos. Notice that the prediction is the same length as the input."
},
{
"code": null,
"e": 9936,
"s": 9680,
"text": "Another decision we took was to build our RNN to be stateful, which means that its internal state is maintained across batches. To be effective, we need to make sure that each batch element follows on from the corresponding element of the preceding batch."
},
{
"code": null,
"e": 11534,
"s": 9936,
"text": "def create_inputs_and_targets(array_of_sequences, batch_size=32): input_seq = array_of_sequences[:,:-1] target_seq = array_of_sequences[:,1:] # Prepare the batches and ensure that is ready to be fed to a stateful RNN num_examples = input_seq.shape[0] num_processed_examples = num_examples - (num_examples % batch_size) input_seq = input_seq[:num_processed_examples] target_seq = target_seq[:num_processed_examples] steps = int(num_processed_examples / 32) inx = np.empty((0,), dtype=np.int32) for i in range(steps): inx = np.concatenate((inx, i + np.arange(0, num_processed_examples, steps))) input_seq_stateful = input_seq[inx] target_seq_stateful = target_seq[inx] # Split data between training and validation sets num_train_examples = int(batch_size * ((0.8 * num_processed_examples) // batch_size)) input_train = input_seq_stateful[:num_train_examples] target_train = target_seq_stateful[:num_train_examples] input_valid = input_seq_stateful[num_train_examples:] target_valid = target_seq_stateful[num_train_examples:] # Create datasets objects for training and validation data dataset_train = tf.data.Dataset.from_tensor_slices((input_train, target_train)) dataset_train = dataset_train.batch(batch_size, drop_remainder=True) dataset_valid = tf.data.Dataset.from_tensor_slices((input_valid, target_valid)) dataset_valid = dataset_valid.batch(batch_size, drop_remainder=True) return (dataset_train, dataset_valid) train_data, valid_data = create_inputs_and_targets(padded_sequences)"
},
{
"code": null,
"e": 12208,
"s": 11534,
"text": "We started by defining an embedding layer that turns our indexes of characters into dense vectors of fixed size. It is important to note that padded values are masked in this layer, which means they are simply ignored. Next, we stacked 2 unidirectional stateful LSTM layers, each with 512 units. These layers have the potential to learn long-term dependencies; however, they are computationally expensive to train. In between them, we introduced a dropout layer. Finally, the last layer outputs one logit for each character in the vocabulary. These are the log-likelihood of each character according to the model. Notice that we get a total of about 4M parameters to train."
},
{
"code": null,
"e": 14950,
"s": 12208,
"text": "def get_model(vocab_size, batch_size): model = tf.keras.Sequential([ tf.keras.layers.Embedding(input_dim=vocab_size, output_dim = 256, mask_zero=True, batch_input_shape=(batch_size, None)), tf.keras.layers.LSTM(units=512, return_sequences=True,stateful=True), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(units=512, return_sequences=True,stateful=True), tf.keras.layers.Dense(units=vocab_size) ]) return modelbatch_size=32model = get_model(len(tokenizer.word_index) + 1, batch_size)model.summary()Model: \"sequential\"_________________________________________________________________Layer (type) Output Shape Param # =================================================================embedding (Embedding) (32, None, 256) 40192 _________________________________________________________________lstm (LSTM) (32, None, 512) 1574912 _________________________________________________________________dropout (Dropout) (32, None, 512) 0 _________________________________________________________________lstm_1 (LSTM) (32, None, 512) 2099200 _________________________________________________________________dense (Dense) (32, None, 157) 80541 =================================================================Total params: 3,794,845Trainable params: 3,794,845Non-trainable params: 0_________________________________________________________________checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(filepath='./models/ckpt', save_weights_only=True, save_best_only=True)model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['sparse_categorical_accuracy'])history = model.fit(train_data, epochs=30, validation_data=valid_data, callbacks=[checkpoint_callback, tf.keras.callbacks.EarlyStopping(patience=2)])Epoch 1/302023/2023 [==============================] - 1041s 512ms/step - loss: 0.5216 - sparse_categorical_accuracy: 0.3516 - val_loss: 0.3298 - val_sparse_categorical_accuracy: 0.5669[...]Epoch 18/302023/2023 [==============================] - 1031s 510ms/step - loss: 0.2495 - sparse_categorical_accuracy: 0.6478 - val_loss: 0.2756 - val_sparse_categorical_accuracy: 0.6268def model_history(history): history_dict = dict() for k, v in history.history.items(): history_dict[k] = [float(val) for val in history.history[k]] return history_dicthistory_dict = model_history(history)"
},
{
"code": null,
"e": 15478,
"s": 14950,
"text": "The training is quite slow even using GPU (despite reducing the training time by a factor of 15 compared to CPU), and recall that we only stacked two LSTM layers with a limited number of units. From the figure below, we can see a rapid increase of the accuracy on both the training and validation datasets and then a steady climb for several epochs. Our callback is eventually executed (when there is no increase in the validation accuracy for more than 2 epochs) to stop the training process. There was no sign of overfitting."
},
{
"code": null,
"e": 16463,
"s": 15478,
"text": "def plot_history(history_dict): plt.figure(figsize=(15,5)) plt.subplot(121) plt.plot(history_dict['sparse_categorical_accuracy']) plt.plot(history_dict['val_sparse_categorical_accuracy']) plt.title('Accuracy vs. epochs') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.xticks(np.arange(len(history_dict['sparse_categorical_accuracy']))) ax = plt.gca() ax.set_xticklabels(1 + np.arange(len(history_dict['sparse_categorical_accuracy']))) plt.legend(['Training', 'Validation'], loc='lower right') plt.subplot(122) plt.plot(history_dict['loss']) plt.plot(history_dict['val_loss']) plt.title('Loss vs. epochs') plt.ylabel('Loss') plt.xlabel('Epoch') plt.xticks(np.arange(len(history_dict['sparse_categorical_accuracy']))) ax = plt.gca() ax.set_xticklabels(1 + np.arange(len(history_dict['sparse_categorical_accuracy']))) plt.legend(['Training', 'Validation'], loc='upper right') plt.show() plot_history(history_dict)"
},
{
"code": null,
"e": 17288,
"s": 16463,
"text": "model = get_model(len(tokenizer.word_index) + 1, batch_size=1)model.load_weights(tf.train.latest_checkpoint('./models/')).expect_partial()def get_logits(model, token_sequence, initial_state1=None, initial_state2=None, initial_state3=None): token_sequence = np.asarray(token_sequence) if initial_state1 is not None: # set states for all recurrent layers model.layers[1].states = initial_state1 model.layers[3].states = initial_state2 model.layers[5].states = initial_state3 else: model.layers[1].reset_states() model.layers[3].reset_states() model.layers[5].reset_states() logit = model.predict(token_sequence) logit = logit[:,-1,:] return logitdef sample_token(logits): pred = tf.random.categorical(logits, num_samples=1).numpy()[0] return pred[0]"
},
{
"code": null,
"e": 17817,
"s": 17288,
"text": "To generate text from our model, we need to specify a seed string to get the network started. Next, we tokenize the initial string and reset the state of the network. The string is then converted to a tensor with a batch size of 1 to be fed to our model. We used the prediction from the last time step to build a categorical distribution and sample from it afterward. Using the same state of our network and the previously sampled token, we can repeat the prediction step until we get the final sequence with the specified size."
},
{
"code": null,
"e": 18732,
"s": 17817,
"text": "The resulting original text is quite interesting to analyze. Remember that our RNN had to learn the Portuguese language from scratch with a fairly small dataset. No explicit information such as syntax or semantics is provided to the model other than practical examples on writings in Portuguese. The dataset is also fairly small for the task. Nevertheless, there are interesting learnings to take notice of. For example, in terms of punctuation, the quotation marks are used correctly, showing the understanding that they are required to open and close. In sentences such as βDesassossego naΜo poderia!... FaleΜncias no meu coracΜ§aΜo...β or βAs cancΜ§oΜes... eΜ um sono de ouvir... FiccΜ§aΜo tanto!...β we can almost grasp some of the rentlessness of Fernando Pessoa. On the other hand, we see that the meaning or intention is not something that an RNN can capture, and we can also identify some orthographic errors."
},
{
"code": null,
"e": 20066,
"s": 18732,
"text": "init_string = 'Desassossego'num_generation_steps = 500token_sequence = tokenizer.texts_to_sequences([init_string])initial_state_1, initial_state_2, initial_state_3 = None, None, Noneinput_sequence = token_sequencefor _ in range(num_generation_steps): logits = get_logits(model, input_sequence, initial_state1=initial_state_1, initial_state2=initial_state_2, initial_state3=initial_state_3) sampled_token = sample_token(logits) token_sequence[0].append(sampled_token) input_sequence = [[sampled_token]] initial_state_1 = model.layers[1].states initial_state_2 = model.layers[3].states initial_state_2 = model.layers[5].states print(tokenizer.sequences_to_texts(token_sequence)[0][::2])Desassossego naΜo poderia!... FaleΜncias no meu coracΜ§aΜo... Esse reer sobre os bracΜ§os dos meus caminhos e ignorantes possamos Β«exensacΜ§aΜo simboΜlicaΒ» e em Natureza, e a noite nova da auseΜncia de cada? NaΜo pense de bem entendida uma orientada prosa). V. como fui... As cancΜ§oΜes... eΜ um sono de ouvir... FiccΜ§aΜo tanto!... Vejo outro olhar pela Tristeza da cadeira, rainha para a Carta, a noite. Depois no paganismo que se sente no espacΜ§o real e de criar uma pedra de tradicΜ§aΜo socioloΜgica para implicar o de Aristoclator S"
},
{
"code": null,
"e": 20381,
"s": 20066,
"text": "For this task, the preprocessing of the data is challenging. We need to ensure that we have our input sequence encoded in a suitable way for the RNN to capture the available semantic representation effectively. RNNs are computationally expensive to train, so we decided to keep the structure as simple as possible."
},
{
"code": null,
"e": 20681,
"s": 20381,
"text": "We were able to generate text in Portuguese without proving any structural information about the language to the model other than the writings of a poet. The model learned some of the fundamental structure of the language while preserving nuances that we can consider similar to the training corpus."
},
{
"code": null,
"e": 21150,
"s": 20681,
"text": "This approach can be extended by increasing the depth of the model with more recurrent layers and the number of units in each layer. Hyperparameters such as the batch size can also be tuned to increase accuracy. We tested the possibility to separate by the form of writing, training one DNN with texts in prose and another with texts in poetry. The results were not satisfying, as the DNNs failed to generate text with a coherent structure. We leave it as future work."
},
{
"code": null,
"e": 21378,
"s": 21150,
"text": "[1] β [De Boom et al., 2018] De Boom, C., Demeester, T., and Dhoedt, B. (2018). Character-level recur-rent neural networks in practice: comparing training and sampling schemes.Neural Computing and Applications, 31(8):4001β4017."
},
{
"code": null,
"e": 21562,
"s": 21378,
"text": "[2] β [Sutskever et al., 2011] Sutskever, I., Martens, J., and Hinton, G. (2011). Generating text with recurrent neural networks. ICMLβ11, page 1017β1024, Madison, WI, USA. Omnipress."
}
] |
Create a Simple Color Picker using JavaScript - GeeksforGeeks
|
05 Jan, 2022
It is quite easy to develop such a client-side application. The primary colors as we know are Red(R), Green(G), Blue(B) and by mixing them we can form any color that we want.
In this article, we will learn to get the RGB value from the user and use CSS to form the color using RGB(red, green, blue) property.
Prerequisite: Basic knowledge of some front-end technologies like HTML, CSS, JavaScript are required.
HTML code:
HTML
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content= "width=device-width, initial-scale=1.0"> <link rel="stylesheet" href="style.css"> <link href="https://fonts.googleapis.com/css2?family=Itim&display=swap" rel="stylesheet"></head> <body> <div class="neumorphism-3"></div> <div class="inpt"> <input type="number" id="red"> <input type="number" id="green"> <input type="number" id="blue"> </div> <h1 class="rainbow-text">--RGB-TO-COLOR--</h1> <script src="script.js"></script></body> </html>
CSS code: The CSS contains some additional lines for a cool hover effect. The following code is the content for βstyles.cssβ code used in the above HTML code.
Filename: style.css
CSS
* { padding: 0; margin: 0; box-sizing: border-box;} body { background: #f7f7f7; padding-top: 14%;} .neumorphism-3 { width: 300px; height: 300px; border-radius: 50%; box-shadow: -3px -3px 7px #e9e9e9a9, 3px 3px 7px #e9e9e9a9; position: absolute; top: 40px; left: 490px;} .neumorphism-3:hover { top: 30px; box-shadow: -3px -3px 7px #999999a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #999999a9, -3px -3px 12px #e9e9e9a9; animation: uplift 0.1s 1 linear;} .neumorphism-3:not( :hover) { animation: downlift 0.1s 1 linear; top: 40px;} @keyframes uplift { 0% { top: 40px; } 25% { top: 37.5px; } 50% { top: 35px; } 75% { top: 32.5px; } 100% { top: 30px; }} @keyframes downlift { 0% { box-shadow: -3px -3px 7px #999999a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #999999a9, -3px -3px 12px #e9e9e9a9; top: 30px; } 25% { box-shadow: -3px -3px 7px #b3b3b3a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #b3b3b3a9, -3px -3px 12px #e9e9e9a9; top: 32.5px; } 50% { top: 35px; box-shadow: -3px -3px 7px #d6d6d6a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #d6d6d6a9, -3px -3px 12px #e9e9e9a9; } 75% { top: 37.5px; box-shadow: -3px -3px 7px #f3f3f3a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #f3f3f3a9, -3px -3px 12px #e9e9e9a9; } 100% { box-shadow: -3px -3px 7px #e9e9e9a9, 3px 3px 7px #e9e9e9a9; top: 40px; }} div.input { position: absolute; top: 450px; left: 550px;} div.input input { height: 30px; width: 60px; font-size: 30px; color: seashell; text-align: center; opacity: 0.7; border: none; border-radius: 4px;} #red { background-color: red;} #green { background-color: green;} #blue { background-color: blue;} /* Chrome, Safari, Edge */ input::-webkit-outer-spin-button,input::-webkit-inner-spin-button { -webkit-appearance: none; margin: 0;} /* Firefox */input[type=number] { -moz-appearance: textfield;} .rainbow-text { background-image: linear-gradient ( to left, violet, indigo, blue, green, yellow, orange, red); width: 300px; height: 50px; -webkit-background-clip: text; color: transparent; font-family: "Itim"; text-align: center; position: relative; top: 340px; left: 500px;}
JavaScript code: The following is the JavaScript code βscript.jsβ used in the above HTML code.
Filename: script.js
Javascript
let red = document.getElementById('red');let green = document.getElementById('green');let blue = document.getElementById('blue');let box = document.querySelector('div.neumorphism-3'); let r = 0, g = 0, b = 0; red.addEventListener("keyup", function (event) { r = red.value; if (!r) r = 0; box.style.backgroundColor = `rgb(${r}, ${g}, ${b})`;}); green.addEventListener("keyup", function (event) { g = green.value; if (!g) g = 0; box.style.backgroundColor = `rgb(${r}, ${g}, ${b})`;}); blue.addEventListener("keyup", function (event) { b = blue.value; if (!b) b = 0; box.style.backgroundColor = `rgb(${r}, ${g}, ${b})`;});
Output: After running the file, you can choose specific values for R, G, B and get your desired colors.
RGB
anikakapoor
sagar0719kumar
CSS-Properties
CSS-Questions
HTML-Questions
JavaScript-Methods
JavaScript-Questions
CSS
HTML
JavaScript
Web Technologies
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Design a web page using HTML and CSS
Form validation using jQuery
How to set space between the flexbox ?
Search Bar using HTML, CSS and JavaScript
How to Create Time-Table schedule using HTML ?
How to set the default value for an HTML <select> element ?
How to set input type date in dd-mm-yyyy format using HTML ?
Hide or show elements in HTML using display property
How to Insert Form Data into Database using PHP ?
REST API (Introduction)
|
[
{
"code": null,
"e": 25376,
"s": 25348,
"text": "\n05 Jan, 2022"
},
{
"code": null,
"e": 25552,
"s": 25376,
"text": "It is quite easy to develop such a client-side application. The primary colors as we know are Red(R), Green(G), Blue(B) and by mixing them we can form any color that we want. "
},
{
"code": null,
"e": 25686,
"s": 25552,
"text": "In this article, we will learn to get the RGB value from the user and use CSS to form the color using RGB(red, green, blue) property."
},
{
"code": null,
"e": 25790,
"s": 25686,
"text": "Prerequisite: Basic knowledge of some front-end technologies like HTML, CSS, JavaScript are required. "
},
{
"code": null,
"e": 25801,
"s": 25790,
"text": "HTML code:"
},
{
"code": null,
"e": 25806,
"s": 25801,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\"> <meta name=\"viewport\" content= \"width=device-width, initial-scale=1.0\"> <link rel=\"stylesheet\" href=\"style.css\"> <link href=\"https://fonts.googleapis.com/css2?family=Itim&display=swap\" rel=\"stylesheet\"></head> <body> <div class=\"neumorphism-3\"></div> <div class=\"inpt\"> <input type=\"number\" id=\"red\"> <input type=\"number\" id=\"green\"> <input type=\"number\" id=\"blue\"> </div> <h1 class=\"rainbow-text\">--RGB-TO-COLOR--</h1> <script src=\"script.js\"></script></body> </html>",
"e": 26461,
"s": 25806,
"text": null
},
{
"code": null,
"e": 26620,
"s": 26461,
"text": "CSS code: The CSS contains some additional lines for a cool hover effect. The following code is the content for βstyles.cssβ code used in the above HTML code."
},
{
"code": null,
"e": 26640,
"s": 26620,
"text": "Filename: style.css"
},
{
"code": null,
"e": 26644,
"s": 26640,
"text": "CSS"
},
{
"code": "* { padding: 0; margin: 0; box-sizing: border-box;} body { background: #f7f7f7; padding-top: 14%;} .neumorphism-3 { width: 300px; height: 300px; border-radius: 50%; box-shadow: -3px -3px 7px #e9e9e9a9, 3px 3px 7px #e9e9e9a9; position: absolute; top: 40px; left: 490px;} .neumorphism-3:hover { top: 30px; box-shadow: -3px -3px 7px #999999a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #999999a9, -3px -3px 12px #e9e9e9a9; animation: uplift 0.1s 1 linear;} .neumorphism-3:not( :hover) { animation: downlift 0.1s 1 linear; top: 40px;} @keyframes uplift { 0% { top: 40px; } 25% { top: 37.5px; } 50% { top: 35px; } 75% { top: 32.5px; } 100% { top: 30px; }} @keyframes downlift { 0% { box-shadow: -3px -3px 7px #999999a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #999999a9, -3px -3px 12px #e9e9e9a9; top: 30px; } 25% { box-shadow: -3px -3px 7px #b3b3b3a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #b3b3b3a9, -3px -3px 12px #e9e9e9a9; top: 32.5px; } 50% { top: 35px; box-shadow: -3px -3px 7px #d6d6d6a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #d6d6d6a9, -3px -3px 12px #e9e9e9a9; } 75% { top: 37.5px; box-shadow: -3px -3px 7px #f3f3f3a9, -3px -3px 12px #e9e9e9a9, 3px 3px 7px #f3f3f3a9, -3px -3px 12px #e9e9e9a9; } 100% { box-shadow: -3px -3px 7px #e9e9e9a9, 3px 3px 7px #e9e9e9a9; top: 40px; }} div.input { position: absolute; top: 450px; left: 550px;} div.input input { height: 30px; width: 60px; font-size: 30px; color: seashell; text-align: center; opacity: 0.7; border: none; border-radius: 4px;} #red { background-color: red;} #green { background-color: green;} #blue { background-color: blue;} /* Chrome, Safari, Edge */ input::-webkit-outer-spin-button,input::-webkit-inner-spin-button { -webkit-appearance: none; margin: 0;} /* Firefox */input[type=number] { -moz-appearance: textfield;} .rainbow-text { background-image: linear-gradient ( to left, violet, indigo, blue, green, yellow, orange, red); width: 300px; height: 50px; -webkit-background-clip: text; color: transparent; font-family: \"Itim\"; text-align: center; position: relative; top: 340px; left: 500px;}",
"e": 29221,
"s": 26644,
"text": null
},
{
"code": null,
"e": 29316,
"s": 29221,
"text": "JavaScript code: The following is the JavaScript code βscript.jsβ used in the above HTML code."
},
{
"code": null,
"e": 29336,
"s": 29316,
"text": "Filename: script.js"
},
{
"code": null,
"e": 29347,
"s": 29336,
"text": "Javascript"
},
{
"code": "let red = document.getElementById('red');let green = document.getElementById('green');let blue = document.getElementById('blue');let box = document.querySelector('div.neumorphism-3'); let r = 0, g = 0, b = 0; red.addEventListener(\"keyup\", function (event) { r = red.value; if (!r) r = 0; box.style.backgroundColor = `rgb(${r}, ${g}, ${b})`;}); green.addEventListener(\"keyup\", function (event) { g = green.value; if (!g) g = 0; box.style.backgroundColor = `rgb(${r}, ${g}, ${b})`;}); blue.addEventListener(\"keyup\", function (event) { b = blue.value; if (!b) b = 0; box.style.backgroundColor = `rgb(${r}, ${g}, ${b})`;});",
"e": 30019,
"s": 29347,
"text": null
},
{
"code": null,
"e": 30123,
"s": 30019,
"text": "Output: After running the file, you can choose specific values for R, G, B and get your desired colors."
},
{
"code": null,
"e": 30127,
"s": 30123,
"text": "RGB"
},
{
"code": null,
"e": 30139,
"s": 30127,
"text": "anikakapoor"
},
{
"code": null,
"e": 30154,
"s": 30139,
"text": "sagar0719kumar"
},
{
"code": null,
"e": 30169,
"s": 30154,
"text": "CSS-Properties"
},
{
"code": null,
"e": 30183,
"s": 30169,
"text": "CSS-Questions"
},
{
"code": null,
"e": 30198,
"s": 30183,
"text": "HTML-Questions"
},
{
"code": null,
"e": 30217,
"s": 30198,
"text": "JavaScript-Methods"
},
{
"code": null,
"e": 30238,
"s": 30217,
"text": "JavaScript-Questions"
},
{
"code": null,
"e": 30242,
"s": 30238,
"text": "CSS"
},
{
"code": null,
"e": 30247,
"s": 30242,
"text": "HTML"
},
{
"code": null,
"e": 30258,
"s": 30247,
"text": "JavaScript"
},
{
"code": null,
"e": 30275,
"s": 30258,
"text": "Web Technologies"
},
{
"code": null,
"e": 30280,
"s": 30275,
"text": "HTML"
},
{
"code": null,
"e": 30378,
"s": 30280,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30415,
"s": 30378,
"text": "Design a web page using HTML and CSS"
},
{
"code": null,
"e": 30444,
"s": 30415,
"text": "Form validation using jQuery"
},
{
"code": null,
"e": 30483,
"s": 30444,
"text": "How to set space between the flexbox ?"
},
{
"code": null,
"e": 30525,
"s": 30483,
"text": "Search Bar using HTML, CSS and JavaScript"
},
{
"code": null,
"e": 30572,
"s": 30525,
"text": "How to Create Time-Table schedule using HTML ?"
},
{
"code": null,
"e": 30632,
"s": 30572,
"text": "How to set the default value for an HTML <select> element ?"
},
{
"code": null,
"e": 30693,
"s": 30632,
"text": "How to set input type date in dd-mm-yyyy format using HTML ?"
},
{
"code": null,
"e": 30746,
"s": 30693,
"text": "Hide or show elements in HTML using display property"
},
{
"code": null,
"e": 30796,
"s": 30746,
"text": "How to Insert Form Data into Database using PHP ?"
}
] |
JavaFX - Animations
|
In general, animating an object implies creating illusion of its motion by rapid display. In JavaFX, a node can be animated by changing its property over time. JavaFX provides a package named javafx.animation. This package contains classes that are used to animate the nodes. Animation is the base class of all these classes.
Using JavaFX, you can apply animations (transitions) such as Fade Transition, Fill Transition, Rotate Transition, Scale Transition, Stroke Transition, Translate Transition, Path Transition, Sequential Transition, Pause Transition, Parallel Transition, etc.
All these transitions are represented by individual classes in the package javafx.animation.
To apply a particular animation to a node, you have to follow the steps given below β
Create a require node using respective class.
Create a require node using respective class.
Instantiate the respective transition (animation) class that is to be applied
Instantiate the respective transition (animation) class that is to be applied
Set the properties of the transition and
Set the properties of the transition and
Finally play the transition using the play() method of the Animation class.
Finally play the transition using the play() method of the Animation class.
In this chapter we are going to discuss examples of basic transitions(Rotation, Scaling, Translation).
Following is the program which demonstrates Rotate Transition in JavaFX. Save this code in a file with the name RotateTransitionExample.java.
import javafx.animation.RotateTransition;
import javafx.application.Application;
import static javafx.application.Application.launch;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.paint.Color;
import javafx.scene.shape.Polygon;
import javafx.stage.Stage;
import javafx.util.Duration;
public class RotateTransitionExample extends Application {
@Override
public void start(Stage stage) {
//Creating a hexagon
Polygon hexagon = new Polygon();
//Adding coordinates to the hexagon
hexagon.getPoints().addAll(new Double[]{
200.0, 50.0,
400.0, 50.0,
450.0, 150.0,
400.0, 250.0,
200.0, 250.0,
150.0, 150.0,
});
//Setting the fill color for the hexagon
hexagon.setFill(Color.BLUE);
//Creating a rotate transition
RotateTransition rotateTransition = new RotateTransition();
//Setting the duration for the transition
rotateTransition.setDuration(Duration.millis(1000));
//Setting the node for the transition
rotateTransition.setNode(hexagon);
//Setting the angle of the rotation
rotateTransition.setByAngle(360);
//Setting the cycle count for the transition
rotateTransition.setCycleCount(50);
//Setting auto reverse value to false
rotateTransition.setAutoReverse(false);
//Playing the animation
rotateTransition.play();
//Creating a Group object
Group root = new Group(hexagon);
//Creating a scene object
Scene scene = new Scene(root, 600, 300);
//Setting title to the Stage
stage.setTitle("Rotate transition example ");
//Adding scene to the stage
stage.setScene(scene);
//Displaying the contents of the stage
stage.show();
}
public static void main(String args[]){
launch(args);
}
}
Compile and execute the saved java file from the command prompt using the following commands.
javac RotateTransitionExample.java
java RotateTransitionExample
On executing, the above program generates a JavaFX window as shown below.
Following is the program which demonstrates Scale Transition in JavaFX. Save this code in a file with the name ScaleTransitionExample.java.
import javafx.animation.ScaleTransition;
import javafx.application.Application;
import static javafx.application.Application.launch;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.stage.Stage;
import javafx.util.Duration;
public class ScaleTransitionExample extends Application {
@Override
public void start(Stage stage) {
//Drawing a Circle
Circle circle = new Circle();
//Setting the position of the circle
circle.setCenterX(300.0f);
circle.setCenterY(135.0f);
//Setting the radius of the circle
circle.setRadius(50.0f);
//Setting the color of the circle
circle.setFill(Color.BROWN);
//Setting the stroke width of the circle
circle.setStrokeWidth(20);
//Creating scale Transition
ScaleTransition scaleTransition = new ScaleTransition();
//Setting the duration for the transition
scaleTransition.setDuration(Duration.millis(1000));
//Setting the node for the transition
scaleTransition.setNode(circle);
//Setting the dimensions for scaling
scaleTransition.setByY(1.5);
scaleTransition.setByX(1.5);
//Setting the cycle count for the translation
scaleTransition.setCycleCount(50);
//Setting auto reverse value to true
scaleTransition.setAutoReverse(false);
//Playing the animation
scaleTransition.play();
//Creating a Group object
Group root = new Group(circle);
//Creating a scene object
Scene scene = new Scene(root, 600, 300);
//Setting title to the Stage
stage.setTitle("Scale transition example");
//Adding scene to the stage
stage.setScene(scene);
//Displaying the contents of the stage
stage.show();
}
public static void main(String args[]){
launch(args);
}
}
Compile and execute the saved java file from the command prompt using the following commands.
javac ScaleTransitionExample.java
java ScaleTransitionExample
On executing, the above program generates a JavaFX window as shown below.
Following is the program which demonstrates Translate Transition in JavaFX. Save this code in a file with the name TranslateTransitionExample.java.
import javafx.animation.TranslateTransition;
import javafx.application.Application;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.stage.Stage;
import javafx.util.Duration;
public class TranslateTransitionExample extends Application {
@Override
public void start(Stage stage) {
//Drawing a Circle
Circle circle = new Circle();
//Setting the position of the circle
circle.setCenterX(150.0f);
circle.setCenterY(135.0f);
//Setting the radius of the circle
circle.setRadius(100.0f);
//Setting the color of the circle
circle.setFill(Color.BROWN);
//Setting the stroke width of the circle
circle.setStrokeWidth(20);
//Creating Translate Transition
TranslateTransition translateTransition = new TranslateTransition();
//Setting the duration of the transition
translateTransition.setDuration(Duration.millis(1000));
//Setting the node for the transition
translateTransition.setNode(circle);
//Setting the value of the transition along the x axis.
translateTransition.setByX(300);
//Setting the cycle count for the transition
translateTransition.setCycleCount(50);
//Setting auto reverse value to false
translateTransition.setAutoReverse(false);
//Playing the animation
translateTransition.play();
//Creating a Group object
Group root = new Group(circle);
//Creating a scene object
Scene scene = new Scene(root, 600, 300);
//Setting title to the Stage
stage.setTitle("Translate transition example");
//Adding scene to the stage
stage.setScene(scene);
//Displaying the contents of the stage
stage.show();
}
public static void main(String args[]){
launch(args);
}
}
Compile and execute the saved java file from the command prompt using the following commands.
javac TranslateTransitionExample.java
java TranslateTransitionExample
On executing, the above program generates a JavaFX window as shown below.
In addition to these, JavaFX provides classes to apply more transitions on nodes. The following are the other kinds of transitions supported by JavaFX.
Transitions that effects the attributes of the nodes Fade, Fill, Stroke
Transitions that effects the attributes of the nodes Fade, Fill, Stroke
Transition that involve more than one basic transitions Sequential, Parallel, Pause
Transition that involve more than one basic transitions Sequential, Parallel, Pause
Transition that translate the object along the specified path Path Transition
Transition that translate the object along the specified path Path Transition
33 Lectures
7.5 hours
Syed Raza
64 Lectures
12.5 hours
Emenwa Global, Ejike IfeanyiChukwu
20 Lectures
4 hours
Emenwa Global, Ejike IfeanyiChukwu
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2226,
"s": 1900,
"text": "In general, animating an object implies creating illusion of its motion by rapid display. In JavaFX, a node can be animated by changing its property over time. JavaFX provides a package named javafx.animation. This package contains classes that are used to animate the nodes. Animation is the base class of all these classes."
},
{
"code": null,
"e": 2483,
"s": 2226,
"text": "Using JavaFX, you can apply animations (transitions) such as Fade Transition, Fill Transition, Rotate Transition, Scale Transition, Stroke Transition, Translate Transition, Path Transition, Sequential Transition, Pause Transition, Parallel Transition, etc."
},
{
"code": null,
"e": 2576,
"s": 2483,
"text": "All these transitions are represented by individual classes in the package javafx.animation."
},
{
"code": null,
"e": 2662,
"s": 2576,
"text": "To apply a particular animation to a node, you have to follow the steps given below β"
},
{
"code": null,
"e": 2708,
"s": 2662,
"text": "Create a require node using respective class."
},
{
"code": null,
"e": 2754,
"s": 2708,
"text": "Create a require node using respective class."
},
{
"code": null,
"e": 2832,
"s": 2754,
"text": "Instantiate the respective transition (animation) class that is to be applied"
},
{
"code": null,
"e": 2910,
"s": 2832,
"text": "Instantiate the respective transition (animation) class that is to be applied"
},
{
"code": null,
"e": 2951,
"s": 2910,
"text": "Set the properties of the transition and"
},
{
"code": null,
"e": 2992,
"s": 2951,
"text": "Set the properties of the transition and"
},
{
"code": null,
"e": 3068,
"s": 2992,
"text": "Finally play the transition using the play() method of the Animation class."
},
{
"code": null,
"e": 3144,
"s": 3068,
"text": "Finally play the transition using the play() method of the Animation class."
},
{
"code": null,
"e": 3247,
"s": 3144,
"text": "In this chapter we are going to discuss examples of basic transitions(Rotation, Scaling, Translation)."
},
{
"code": null,
"e": 3389,
"s": 3247,
"text": "Following is the program which demonstrates Rotate Transition in JavaFX. Save this code in a file with the name RotateTransitionExample.java."
},
{
"code": null,
"e": 5485,
"s": 3389,
"text": "import javafx.animation.RotateTransition; \nimport javafx.application.Application; \nimport static javafx.application.Application.launch; \nimport javafx.scene.Group; \nimport javafx.scene.Scene; \nimport javafx.scene.paint.Color; \nimport javafx.scene.shape.Polygon; \nimport javafx.stage.Stage; \nimport javafx.util.Duration; \n \npublic class RotateTransitionExample extends Application { \n @Override \n public void start(Stage stage) { \n //Creating a hexagon \n Polygon hexagon = new Polygon(); \n \n //Adding coordinates to the hexagon \n hexagon.getPoints().addAll(new Double[]{ \n 200.0, 50.0, \n 400.0, 50.0, \n 450.0, 150.0, \n 400.0, 250.0, \n 200.0, 250.0, \n 150.0, 150.0, \n }); \n //Setting the fill color for the hexagon \n hexagon.setFill(Color.BLUE); \n \n //Creating a rotate transition \n RotateTransition rotateTransition = new RotateTransition(); \n \n //Setting the duration for the transition \n rotateTransition.setDuration(Duration.millis(1000)); \n \n //Setting the node for the transition \n rotateTransition.setNode(hexagon); \n \n //Setting the angle of the rotation \n rotateTransition.setByAngle(360); \n \n //Setting the cycle count for the transition \n rotateTransition.setCycleCount(50); \n \n //Setting auto reverse value to false \n rotateTransition.setAutoReverse(false); \n \n //Playing the animation \n rotateTransition.play(); \n \n //Creating a Group object \n Group root = new Group(hexagon); \n \n //Creating a scene object \n Scene scene = new Scene(root, 600, 300); \n \n //Setting title to the Stage \n stage.setTitle(\"Rotate transition example \"); \n \n //Adding scene to the stage \n stage.setScene(scene); \n \n //Displaying the contents of the stage \n stage.show(); \n } \n public static void main(String args[]){ \n launch(args); \n } \n} "
},
{
"code": null,
"e": 5579,
"s": 5485,
"text": "Compile and execute the saved java file from the command prompt using the following commands."
},
{
"code": null,
"e": 5645,
"s": 5579,
"text": "javac RotateTransitionExample.java \njava RotateTransitionExample\n"
},
{
"code": null,
"e": 5719,
"s": 5645,
"text": "On executing, the above program generates a JavaFX window as shown below."
},
{
"code": null,
"e": 5859,
"s": 5719,
"text": "Following is the program which demonstrates Scale Transition in JavaFX. Save this code in a file with the name ScaleTransitionExample.java."
},
{
"code": null,
"e": 7950,
"s": 5859,
"text": "import javafx.animation.ScaleTransition; \nimport javafx.application.Application; \nimport static javafx.application.Application.launch; \nimport javafx.scene.Group; \nimport javafx.scene.Scene; \nimport javafx.scene.paint.Color; \nimport javafx.scene.shape.Circle; \nimport javafx.stage.Stage; \nimport javafx.util.Duration; \n \npublic class ScaleTransitionExample extends Application { \n @Override \n public void start(Stage stage) { \n //Drawing a Circle \n Circle circle = new Circle(); \n \n //Setting the position of the circle \n circle.setCenterX(300.0f); \n circle.setCenterY(135.0f); \n \n //Setting the radius of the circle \n circle.setRadius(50.0f); \n \n //Setting the color of the circle \n circle.setFill(Color.BROWN); \n \n //Setting the stroke width of the circle \n circle.setStrokeWidth(20); \n \n //Creating scale Transition \n ScaleTransition scaleTransition = new ScaleTransition(); \n \n //Setting the duration for the transition \n scaleTransition.setDuration(Duration.millis(1000)); \n \n //Setting the node for the transition \n scaleTransition.setNode(circle); \n \n //Setting the dimensions for scaling \n scaleTransition.setByY(1.5); \n scaleTransition.setByX(1.5); \n \n //Setting the cycle count for the translation \n scaleTransition.setCycleCount(50); \n \n //Setting auto reverse value to true \n scaleTransition.setAutoReverse(false); \n \n //Playing the animation \n scaleTransition.play(); \n \n //Creating a Group object \n Group root = new Group(circle); \n \n //Creating a scene object \n Scene scene = new Scene(root, 600, 300); \n \n //Setting title to the Stage \n stage.setTitle(\"Scale transition example\"); \n \n //Adding scene to the stage \n stage.setScene(scene); \n \n //Displaying the contents of the stage \n stage.show(); \n } \n public static void main(String args[]){ \n launch(args); \n } \n}"
},
{
"code": null,
"e": 8044,
"s": 7950,
"text": "Compile and execute the saved java file from the command prompt using the following commands."
},
{
"code": null,
"e": 8108,
"s": 8044,
"text": "javac ScaleTransitionExample.java \njava ScaleTransitionExample\n"
},
{
"code": null,
"e": 8182,
"s": 8108,
"text": "On executing, the above program generates a JavaFX window as shown below."
},
{
"code": null,
"e": 8330,
"s": 8182,
"text": "Following is the program which demonstrates Translate Transition in JavaFX. Save this code in a file with the name TranslateTransitionExample.java."
},
{
"code": null,
"e": 10398,
"s": 8330,
"text": "import javafx.animation.TranslateTransition; \nimport javafx.application.Application; \nimport javafx.scene.Group; \nimport javafx.scene.Scene; \nimport javafx.scene.paint.Color; \nimport javafx.scene.shape.Circle; \nimport javafx.stage.Stage; \nimport javafx.util.Duration; \n \npublic class TranslateTransitionExample extends Application { \n @Override \n public void start(Stage stage) { \n //Drawing a Circle \n Circle circle = new Circle(); \n \n //Setting the position of the circle \n circle.setCenterX(150.0f); \n circle.setCenterY(135.0f); \n \n //Setting the radius of the circle \n circle.setRadius(100.0f); \n \n //Setting the color of the circle \n circle.setFill(Color.BROWN); \n \n //Setting the stroke width of the circle \n circle.setStrokeWidth(20); \n \n //Creating Translate Transition \n TranslateTransition translateTransition = new TranslateTransition(); \n \n //Setting the duration of the transition \n translateTransition.setDuration(Duration.millis(1000)); \n \n //Setting the node for the transition \n translateTransition.setNode(circle); \n \n //Setting the value of the transition along the x axis. \n translateTransition.setByX(300); \n \n //Setting the cycle count for the transition \n translateTransition.setCycleCount(50); \n \n //Setting auto reverse value to false \n translateTransition.setAutoReverse(false); \n \n //Playing the animation \n translateTransition.play(); \n \n //Creating a Group object \n Group root = new Group(circle); \n \n //Creating a scene object \n Scene scene = new Scene(root, 600, 300); \n \n //Setting title to the Stage \n stage.setTitle(\"Translate transition example\"); \n \n //Adding scene to the stage \n stage.setScene(scene); \n \n //Displaying the contents of the stage \n stage.show(); \n } \n public static void main(String args[]){ \n launch(args); \n } \n}"
},
{
"code": null,
"e": 10492,
"s": 10398,
"text": "Compile and execute the saved java file from the command prompt using the following commands."
},
{
"code": null,
"e": 10565,
"s": 10492,
"text": "javac TranslateTransitionExample.java \njava TranslateTransitionExample \n"
},
{
"code": null,
"e": 10639,
"s": 10565,
"text": "On executing, the above program generates a JavaFX window as shown below."
},
{
"code": null,
"e": 10791,
"s": 10639,
"text": "In addition to these, JavaFX provides classes to apply more transitions on nodes. The following are the other kinds of transitions supported by JavaFX."
},
{
"code": null,
"e": 10863,
"s": 10791,
"text": "Transitions that effects the attributes of the nodes Fade, Fill, Stroke"
},
{
"code": null,
"e": 10935,
"s": 10863,
"text": "Transitions that effects the attributes of the nodes Fade, Fill, Stroke"
},
{
"code": null,
"e": 11019,
"s": 10935,
"text": "Transition that involve more than one basic transitions Sequential, Parallel, Pause"
},
{
"code": null,
"e": 11103,
"s": 11019,
"text": "Transition that involve more than one basic transitions Sequential, Parallel, Pause"
},
{
"code": null,
"e": 11181,
"s": 11103,
"text": "Transition that translate the object along the specified path Path Transition"
},
{
"code": null,
"e": 11259,
"s": 11181,
"text": "Transition that translate the object along the specified path Path Transition"
},
{
"code": null,
"e": 11294,
"s": 11259,
"text": "\n 33 Lectures \n 7.5 hours \n"
},
{
"code": null,
"e": 11305,
"s": 11294,
"text": " Syed Raza"
},
{
"code": null,
"e": 11341,
"s": 11305,
"text": "\n 64 Lectures \n 12.5 hours \n"
},
{
"code": null,
"e": 11377,
"s": 11341,
"text": " Emenwa Global, Ejike IfeanyiChukwu"
},
{
"code": null,
"e": 11410,
"s": 11377,
"text": "\n 20 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 11446,
"s": 11410,
"text": " Emenwa Global, Ejike IfeanyiChukwu"
},
{
"code": null,
"e": 11453,
"s": 11446,
"text": " Print"
},
{
"code": null,
"e": 11464,
"s": 11453,
"text": " Add Notes"
}
] |
Determining Countability in TOC - GeeksforGeeks
|
27 Nov, 2019
Countable Set is a set having cardinality same as that of some subset of N the set of natural numbers . A countable set is the one which is listable.
Cardinality of a countable set can be a finite number. For example, B: {1, 5, 4}, |B| = 3, in this case its termed countably finite or the cardinality of countable set can be infinite. For example, A: {2, 4, 6, 8 ...}, in this case its termed countably infinite.
Common Traces for Countable Set:
Cardinality expressed in form where , ; m may or may not be β
It has finite elements* only in case of countably finite sets.
It listable in terms of roaster form* an exhaustive list exists which can include every element atleast once, in case of countably infinite list first few elements followed by three dot ellipsis(...).
Set of Rational numbers is Countably Infinite:
Follow along the red line to build roaster set containing all rational numbers. Hence an exhaustive set containing every element atleast once can be build therefore set of rational numbers is countably infinite.
Uncountable Sets:A set such that its elements cannot be listed, or to put intuitively, there exists no sequence which can list every element of the set atleast once.
Example:
R : {set of real numbers is uncountable}
B : {set of all binary sequences of infinite length}
Common Traces for Uncountable Set:
Cardinality expressed in form ;
It is power set of set with infinite elements
It is equal set to R set of real numbers
It is equal set to Q set of irrational numbers
It is non-listable set
Union Operations quick Reference:
Example-1:Let N be the set of natural numbers. Consider the following sets,
P: Set of Rational numbers (positive and negative)
Q: Set of functions from {0, 1} to N
R: Set of functions from N to {0, 1}
S: Set of finite subsets of N
Which of the above sets are countable ?(A) Q and S only(B) P and S only(C) P and R only(D) P, Q and S only
Explanation:Please see GATE CS 2018 | Question 58
Example-2:Consider the following sets:
S1: Set of all recursively enumerable languages over the alphabet {0, 1}.
S2: Set of all syntactically valid C programs.
S3: Set of all languages over the alphabet {0, 1}.
S4: Set of all non-regular languages over the alphabet {0, 1}.
Which of the above sets are uncountable?(A) S1 and S2(B) S3 and S4(C) S1 and S4(D) S2 and S3
Explanation:Please see GATE CS 2019 | Question 43
GATE CS
Theory of Computation & Automata
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Difference between Clustered and Non-clustered index
Preemptive and Non-Preemptive Scheduling
Phases of a Compiler
Introduction of Process Synchronization
Differences between IPv4 and IPv6
Regular Expressions, Regular Grammar and Regular Languages
Difference between DFA and NFA
Introduction of Finite Automata
Difference between Mealy machine and Moore machine
Pumping Lemma in Theory of Computation
|
[
{
"code": null,
"e": 24352,
"s": 24324,
"text": "\n27 Nov, 2019"
},
{
"code": null,
"e": 24502,
"s": 24352,
"text": "Countable Set is a set having cardinality same as that of some subset of N the set of natural numbers . A countable set is the one which is listable."
},
{
"code": null,
"e": 24765,
"s": 24502,
"text": "Cardinality of a countable set can be a finite number. For example, B: {1, 5, 4}, |B| = 3, in this case its termed countably finite or the cardinality of countable set can be infinite. For example, A: {2, 4, 6, 8 ...}, in this case its termed countably infinite."
},
{
"code": null,
"e": 24798,
"s": 24765,
"text": "Common Traces for Countable Set:"
},
{
"code": null,
"e": 24861,
"s": 24798,
"text": "Cardinality expressed in form where , ; m may or may not be β"
},
{
"code": null,
"e": 24924,
"s": 24861,
"text": "It has finite elements* only in case of countably finite sets."
},
{
"code": null,
"e": 25125,
"s": 24924,
"text": "It listable in terms of roaster form* an exhaustive list exists which can include every element atleast once, in case of countably infinite list first few elements followed by three dot ellipsis(...)."
},
{
"code": null,
"e": 25172,
"s": 25125,
"text": "Set of Rational numbers is Countably Infinite:"
},
{
"code": null,
"e": 25384,
"s": 25172,
"text": "Follow along the red line to build roaster set containing all rational numbers. Hence an exhaustive set containing every element atleast once can be build therefore set of rational numbers is countably infinite."
},
{
"code": null,
"e": 25550,
"s": 25384,
"text": "Uncountable Sets:A set such that its elements cannot be listed, or to put intuitively, there exists no sequence which can list every element of the set atleast once."
},
{
"code": null,
"e": 25559,
"s": 25550,
"text": "Example:"
},
{
"code": null,
"e": 25654,
"s": 25559,
"text": "R : {set of real numbers is uncountable}\nB : {set of all binary sequences of infinite length} "
},
{
"code": null,
"e": 25689,
"s": 25654,
"text": "Common Traces for Uncountable Set:"
},
{
"code": null,
"e": 25721,
"s": 25689,
"text": "Cardinality expressed in form ;"
},
{
"code": null,
"e": 25767,
"s": 25721,
"text": "It is power set of set with infinite elements"
},
{
"code": null,
"e": 25808,
"s": 25767,
"text": "It is equal set to R set of real numbers"
},
{
"code": null,
"e": 25855,
"s": 25808,
"text": "It is equal set to Q set of irrational numbers"
},
{
"code": null,
"e": 25878,
"s": 25855,
"text": "It is non-listable set"
},
{
"code": null,
"e": 25912,
"s": 25878,
"text": "Union Operations quick Reference:"
},
{
"code": null,
"e": 25988,
"s": 25912,
"text": "Example-1:Let N be the set of natural numbers. Consider the following sets,"
},
{
"code": null,
"e": 26144,
"s": 25988,
"text": "P: Set of Rational numbers (positive and negative)\nQ: Set of functions from {0, 1} to N\nR: Set of functions from N to {0, 1}\nS: Set of finite subsets of N "
},
{
"code": null,
"e": 26251,
"s": 26144,
"text": "Which of the above sets are countable ?(A) Q and S only(B) P and S only(C) P and R only(D) P, Q and S only"
},
{
"code": null,
"e": 26301,
"s": 26251,
"text": "Explanation:Please see GATE CS 2018 | Question 58"
},
{
"code": null,
"e": 26340,
"s": 26301,
"text": "Example-2:Consider the following sets:"
},
{
"code": null,
"e": 26576,
"s": 26340,
"text": "S1: Set of all recursively enumerable languages over the alphabet {0, 1}.\nS2: Set of all syntactically valid C programs.\nS3: Set of all languages over the alphabet {0, 1}.\nS4: Set of all non-regular languages over the alphabet {0, 1}. "
},
{
"code": null,
"e": 26669,
"s": 26576,
"text": "Which of the above sets are uncountable?(A) S1 and S2(B) S3 and S4(C) S1 and S4(D) S2 and S3"
},
{
"code": null,
"e": 26719,
"s": 26669,
"text": "Explanation:Please see GATE CS 2019 | Question 43"
},
{
"code": null,
"e": 26727,
"s": 26719,
"text": "GATE CS"
},
{
"code": null,
"e": 26760,
"s": 26727,
"text": "Theory of Computation & Automata"
},
{
"code": null,
"e": 26858,
"s": 26760,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26911,
"s": 26858,
"text": "Difference between Clustered and Non-clustered index"
},
{
"code": null,
"e": 26952,
"s": 26911,
"text": "Preemptive and Non-Preemptive Scheduling"
},
{
"code": null,
"e": 26973,
"s": 26952,
"text": "Phases of a Compiler"
},
{
"code": null,
"e": 27013,
"s": 26973,
"text": "Introduction of Process Synchronization"
},
{
"code": null,
"e": 27047,
"s": 27013,
"text": "Differences between IPv4 and IPv6"
},
{
"code": null,
"e": 27106,
"s": 27047,
"text": "Regular Expressions, Regular Grammar and Regular Languages"
},
{
"code": null,
"e": 27137,
"s": 27106,
"text": "Difference between DFA and NFA"
},
{
"code": null,
"e": 27169,
"s": 27137,
"text": "Introduction of Finite Automata"
},
{
"code": null,
"e": 27220,
"s": 27169,
"text": "Difference between Mealy machine and Moore machine"
}
] |
How to get formatted JSON in .NET using C#?
|
Use Namespace Newtonsoft.Json.Formatting Newtonsoft.Json.Formatting provides formatting options to Format the Json
None β No special formatting is applied. This is the default.
Indented β Causes child objects to be indented according to the Newtonsoft.Json.JsonTextWriter.Indentation and Newtonsoft.Json.JsonTextWriter.IndentChar settings.
static void Main(string[] args){
Product product = new Product{
Name = "Apple",
Expiry = new DateTime(2008, 12, 28),
Price = 3.9900M,
Sizes = new[] { "Small", "Medium", "Large" }
};
string json = JsonConvert.SerializeObject(product, Formatting.Indented);
Console.WriteLine(json);
Product deserializedProduct = JsonConvert.DeserializeObject<Product>(json);
Console.ReadLine();
}
class Product{
public String[] Sizes { get; set; }
public decimal Price { get; set; }
public DateTime Expiry { get; set; }
public string Name { get; set; }
}
{
"Sizes": [
"Small",
"Medium",
"Large"
],
"Price": 3.9900,
"Expiry": "2008-12-28T00:00:00",
"Name": "Apple"
}
static class Program{
static void Main(string[] args){
Product product = new Product{
Name = "Apple",
Expiry = new DateTime(2008, 12, 28),
Price = 3.9900M,
Sizes = new[] { "Small", "Medium", "Large" }
};
string json = JsonConvert.SerializeObject(product, Formatting.None);
Console.WriteLine(json);
Product deserializedProduct = JsonConvert.DeserializeObject<Product>(json);
Console.ReadLine();
}
}
class Product{
public String[] Sizes { get; set; }
public decimal Price { get; set; }
public DateTime Expiry { get; set; }
public string Name { get; set; }
}
{"Sizes":["Small","Medium","Large"],"Price":3.9900,"Expiry":"2008-12-28T00:00:00","Name":"Apple"}
|
[
{
"code": null,
"e": 1177,
"s": 1062,
"text": "Use Namespace Newtonsoft.Json.Formatting Newtonsoft.Json.Formatting provides formatting options to Format the Json"
},
{
"code": null,
"e": 1239,
"s": 1177,
"text": "None β No special formatting is applied. This is the default."
},
{
"code": null,
"e": 1402,
"s": 1239,
"text": "Indented β Causes child objects to be indented according to the Newtonsoft.Json.JsonTextWriter.Indentation and Newtonsoft.Json.JsonTextWriter.IndentChar settings."
},
{
"code": null,
"e": 1992,
"s": 1402,
"text": "static void Main(string[] args){\n Product product = new Product{\n Name = \"Apple\",\n Expiry = new DateTime(2008, 12, 28),\n Price = 3.9900M,\n Sizes = new[] { \"Small\", \"Medium\", \"Large\" }\n };\n string json = JsonConvert.SerializeObject(product, Formatting.Indented);\n Console.WriteLine(json);\n Product deserializedProduct = JsonConvert.DeserializeObject<Product>(json);\n Console.ReadLine();\n}\nclass Product{\n public String[] Sizes { get; set; }\n public decimal Price { get; set; }\n public DateTime Expiry { get; set; }\n public string Name { get; set; }\n}"
},
{
"code": null,
"e": 2136,
"s": 1992,
"text": "{\n \"Sizes\": [\n \"Small\",\n \"Medium\",\n \"Large\"\n ],\n \"Price\": 3.9900,\n \"Expiry\": \"2008-12-28T00:00:00\",\n \"Name\": \"Apple\"\n}"
},
{
"code": null,
"e": 2782,
"s": 2136,
"text": "static class Program{\n static void Main(string[] args){\n Product product = new Product{\n Name = \"Apple\",\n Expiry = new DateTime(2008, 12, 28),\n Price = 3.9900M,\n Sizes = new[] { \"Small\", \"Medium\", \"Large\" }\n };\n string json = JsonConvert.SerializeObject(product, Formatting.None);\n Console.WriteLine(json);\n Product deserializedProduct = JsonConvert.DeserializeObject<Product>(json);\n Console.ReadLine();\n }\n}\nclass Product{\n public String[] Sizes { get; set; }\n public decimal Price { get; set; }\n public DateTime Expiry { get; set; }\n public string Name { get; set; }\n}"
},
{
"code": null,
"e": 2880,
"s": 2782,
"text": "{\"Sizes\":[\"Small\",\"Medium\",\"Large\"],\"Price\":3.9900,\"Expiry\":\"2008-12-28T00:00:00\",\"Name\":\"Apple\"}"
}
] |
Count all possible pairs in given Array with product K - GeeksforGeeks
|
28 Feb, 2022
Given an integer array arr[] of size N and a positive integer K, the task is to count all the pairs in the array with a product equal to K.
Examples:
Input: arr[] = {1, 2, 16, 4, 4, 4, 8 }, K=16Output: 5Explanation: Possible pairs are (1, 16), (2, 8), (4, 4), (4, 4), (4, 4)
Input: arr[] = {1, 10, 20, 10, 4, 5, 5, 2 }, K=20Output: 5Explanation: Possible pairs are (1, 20), (2, 10), (2, 10), (4, 5), (4, 5)
Approach: The idea is to use hashing to store the elements and check if K/arr[i] exists in the array or not using the map and increase the count accordingly.
Follow the steps below to solve the problem:
Initialize the variable count as 0 to store the answer.
Initialize the unordered_map<int, int> mp[].
Iterate over the range [0, N) using the variable i and store the frequencies of all elements of the array arr[] in the map mp[].
Iterate over the range [0, N) using the variable i and perform the following tasks:Initialize the variable index as K/arr[i].If K is not a power of 2 and index is present in map mp[] then increase the value of count by mp[arr[i]]*mp[index] and erase both of them from the map mp[].If K is a power of 2 and index is present in map mp[] then increase the value of count by mp[index]*(mp[index]-1)/2 and erase it from the map mp[].
Initialize the variable index as K/arr[i].
If K is not a power of 2 and index is present in map mp[] then increase the value of count by mp[arr[i]]*mp[index] and erase both of them from the map mp[].
If K is a power of 2 and index is present in map mp[] then increase the value of count by mp[index]*(mp[index]-1)/2 and erase it from the map mp[].
After performing the above steps, print the value of count as the answer.
Below is the implementation of the above approach.
C++14
Java
Python3
C#
Javascript
// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to count// the total number of pairsint countPairsWithProductK( int arr[], int n, int k) { int count = 0; // Initialize hashmap. unordered_map<int, int> mp; // Insert array elements to hashmap for (int i = 0; i < n; i++) { mp[arr[i]]++; } for (int i = 0; i < n; i++) { double index = 1.0 * k / arr[i]; // If k is not power of two if (index >= 0 && ((index - (int)(index)) == 0) && mp.find(k / arr[i]) != mp.end() && (index != arr[i])) { count += mp[arr[i]] * mp[index]; // After counting erase the element mp.erase(arr[i]); mp.erase(index); } // If k is power of 2 if (index >= 0 && ((index - (int)(index)) == 0) && mp.find(k / arr[i]) != mp.end() && (index == arr[i])) { // Pair count count += (mp[arr[i]] * (mp[arr[i]] - 1)) / 2; // After counting erase the element; mp.erase(arr[i]); } } return count;} // Driver Codeint main() { int arr[] = { 1, 2, 16, 4, 4, 4, 8 }; int N = sizeof(arr) / sizeof(arr[0]); int K = 16; cout << countPairsWithProductK(arr, N, K); return 0;}
// Java program for the above approachimport java.util.*; class GFG{ // Function to count // the total number of pairs static int countPairsWithProductK( int arr[], int n, int k) { int count = 0; // Initialize hashmap. HashMap<Integer,Integer> mp = new HashMap<Integer,Integer>(); // Insert array elements to hashmap for (int i = 0; i < n; i++) { if(mp.containsKey(arr[i])){ mp.put(arr[i], mp.get(arr[i])+1); }else{ mp.put(arr[i], 1); } } for (int i = 0; i < n; i++) { int index = (int) (1.0 * k / arr[i]); // If k is not power of two if (index >= 0 && ((index - (int)(index)) == 0) && mp.containsKey(k / arr[i]) && (index != arr[i])) { count += mp.get(arr[i]) * mp.get(index); // After counting erase the element mp.remove(arr[i]); mp.remove(index); } // If k is power of 2 if (index >= 0 && ((index - (int)(index)) == 0) && mp.containsKey(k / arr[i]) && (index == arr[i])) { // Pair count count += (mp.get(arr[i]) * (mp.get(arr[i]) - 1)) / 2; // After counting erase the element; mp.remove(arr[i]); } } return count; } // Driver Code public static void main(String[] args) { int arr[] = { 1, 2, 16, 4, 4, 4, 8 }; int N = arr.length; int K = 16; System.out.print(countPairsWithProductK(arr, N, K)); }} // This code is contributed by 29AjayKumar
# Python 3 program for the above approachfrom collections import defaultdict # Function to count# the total number of pairsdef countPairsWithProductK(arr, n, k): count = 0 # Initialize hashmap. mp = defaultdict(int) # Insert array elements to hashmap for i in range(n): mp[arr[i]] += 1 for i in range(n): index = 1.0 * k / arr[i] # If k is not power of two if (index >= 0 and ((index - (int)(index)) == 0) and (k / arr[i]) in mp and (index != arr[i])): count += mp[arr[i]] * mp[index] # After counting erase the element del mp[arr[i]] del mp[index] # If k is power of 2 if (index >= 0 and ((index - (int)(index)) == 0) and (k / arr[i]) in mp and (index == arr[i])): # Pair count count += ((mp[arr[i]] * (mp[arr[i]] - 1)) / 2) # After counting erase the element; del mp[arr[i]] return count # Driver Codeif __name__ == "__main__": arr = [1, 2, 16, 4, 4, 4, 8] N = len(arr) K = 16 print(int(countPairsWithProductK(arr, N, K))) # This code is contributed by ukasp.
// C# program for the above approachusing System;using System.Collections.Generic; public class GFG{ // Function to count // the total number of pairs static int countPairsWithProductK( int []arr, int n, int k) { int count = 0; // Initialize hashmap. Dictionary<int,int> mp = new Dictionary<int,int>(); // Insert array elements to hashmap for (int i = 0; i < n; i++) { if(mp.ContainsKey(arr[i])){ mp[arr[i]] = mp[arr[i]]+1; }else{ mp.Add(arr[i], 1); } } for (int i = 0; i < n; i++) { int index = (int) (1.0 * k / arr[i]); // If k is not power of two if (index >= 0 && ((index - (int)(index)) == 0) && mp.ContainsKey(k / arr[i]) && (index != arr[i])) { count += mp[arr[i]] * mp[index]; // After counting erase the element mp.Remove(arr[i]); mp.Remove(index); } // If k is power of 2 if (index >= 0 && ((index - (int)(index)) == 0) && mp.ContainsKey(k / arr[i]) && (index == arr[i])) { // Pair count count += (mp[arr[i]] * (mp[arr[i]] - 1)) / 2; // After counting erase the element; mp.Remove(arr[i]); } } return count; } // Driver Code public static void Main(String[] args) { int []arr = { 1, 2, 16, 4, 4, 4, 8 }; int N = arr.Length; int K = 16; Console.Write(countPairsWithProductK(arr, N, K)); }} // This code is contributed by Rajput-Ji
<script> // JavaScript code for the above approach // Function to count // the total number of pairs function countPairsWithProductK( arr, n, k) { let count = 0; // Initialize hashmap. let mp = new Map(); // Insert array elements to hashmap for (let i = 0; i < n; i++) { if (mp.has(arr[i])) { mp.set(arr[i], mp.get(arr[i]) + 1); } else { mp.set(arr[i], 1); } } for (let i = 0; i < n; i++) { let index = 1.0 * k / arr[i]; // If k is not power of two if (index >= 0 && ((index - Math.floor(index)) == 0) && mp.has(k / arr[i]) && (index != arr[i])) { count += mp.get(arr[i]) * mp.get(index); // After counting erase the element mp.delete(arr[i]); mp.delete(index); } // If k is power of 2 if (index >= 0 && ((index - Math.floor(index)) == 0) && mp.has(k / arr[i]) && (index == arr[i])) { // Pair count count += (mp.get(arr[i]) * (mp.get(arr[i]) - 1)) / 2; // After counting erase the element; mp.delete(arr[i]); } } return count; } // Driver Code let arr = [1, 2, 16, 4, 4, 4, 8]; let N = arr.length; let K = 16; document.write(countPairsWithProductK(arr, N, K)); // This code is contributed by Potta Lokesh </script>
5
Time Complexity: O(N)Auxiliary Space: O(N)
lokeshpotta20
ukasp
29AjayKumar
Rajput-Ji
frequency-counting
Arrays
Combinatorial
Hash
Mathematical
Arrays
Hash
Mathematical
Combinatorial
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Count pairs with given sum
Chocolate Distribution Problem
Window Sliding Technique
Reversal algorithm for array rotation
Next Greater Element
Write a program to print all permutations of a given string
Permutation and Combination in Python
itertools.combinations() module in Python to print all possible combinations
Combinational Sum
Factorial of a large number
|
[
{
"code": null,
"e": 26067,
"s": 26039,
"text": "\n28 Feb, 2022"
},
{
"code": null,
"e": 26207,
"s": 26067,
"text": "Given an integer array arr[] of size N and a positive integer K, the task is to count all the pairs in the array with a product equal to K."
},
{
"code": null,
"e": 26217,
"s": 26207,
"text": "Examples:"
},
{
"code": null,
"e": 26342,
"s": 26217,
"text": "Input: arr[] = {1, 2, 16, 4, 4, 4, 8 }, K=16Output: 5Explanation: Possible pairs are (1, 16), (2, 8), (4, 4), (4, 4), (4, 4)"
},
{
"code": null,
"e": 26474,
"s": 26342,
"text": "Input: arr[] = {1, 10, 20, 10, 4, 5, 5, 2 }, K=20Output: 5Explanation: Possible pairs are (1, 20), (2, 10), (2, 10), (4, 5), (4, 5)"
},
{
"code": null,
"e": 26633,
"s": 26474,
"text": "Approach: The idea is to use hashing to store the elements and check if K/arr[i] exists in the array or not using the map and increase the count accordingly. "
},
{
"code": null,
"e": 26678,
"s": 26633,
"text": "Follow the steps below to solve the problem:"
},
{
"code": null,
"e": 26734,
"s": 26678,
"text": "Initialize the variable count as 0 to store the answer."
},
{
"code": null,
"e": 26779,
"s": 26734,
"text": "Initialize the unordered_map<int, int> mp[]."
},
{
"code": null,
"e": 26908,
"s": 26779,
"text": "Iterate over the range [0, N) using the variable i and store the frequencies of all elements of the array arr[] in the map mp[]."
},
{
"code": null,
"e": 27337,
"s": 26908,
"text": "Iterate over the range [0, N) using the variable i and perform the following tasks:Initialize the variable index as K/arr[i].If K is not a power of 2 and index is present in map mp[] then increase the value of count by mp[arr[i]]*mp[index] and erase both of them from the map mp[].If K is a power of 2 and index is present in map mp[] then increase the value of count by mp[index]*(mp[index]-1)/2 and erase it from the map mp[]."
},
{
"code": null,
"e": 27380,
"s": 27337,
"text": "Initialize the variable index as K/arr[i]."
},
{
"code": null,
"e": 27537,
"s": 27380,
"text": "If K is not a power of 2 and index is present in map mp[] then increase the value of count by mp[arr[i]]*mp[index] and erase both of them from the map mp[]."
},
{
"code": null,
"e": 27685,
"s": 27537,
"text": "If K is a power of 2 and index is present in map mp[] then increase the value of count by mp[index]*(mp[index]-1)/2 and erase it from the map mp[]."
},
{
"code": null,
"e": 27759,
"s": 27685,
"text": "After performing the above steps, print the value of count as the answer."
},
{
"code": null,
"e": 27810,
"s": 27759,
"text": "Below is the implementation of the above approach."
},
{
"code": null,
"e": 27816,
"s": 27810,
"text": "C++14"
},
{
"code": null,
"e": 27821,
"s": 27816,
"text": "Java"
},
{
"code": null,
"e": 27829,
"s": 27821,
"text": "Python3"
},
{
"code": null,
"e": 27832,
"s": 27829,
"text": "C#"
},
{
"code": null,
"e": 27843,
"s": 27832,
"text": "Javascript"
},
{
"code": "// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to count// the total number of pairsint countPairsWithProductK( int arr[], int n, int k) { int count = 0; // Initialize hashmap. unordered_map<int, int> mp; // Insert array elements to hashmap for (int i = 0; i < n; i++) { mp[arr[i]]++; } for (int i = 0; i < n; i++) { double index = 1.0 * k / arr[i]; // If k is not power of two if (index >= 0 && ((index - (int)(index)) == 0) && mp.find(k / arr[i]) != mp.end() && (index != arr[i])) { count += mp[arr[i]] * mp[index]; // After counting erase the element mp.erase(arr[i]); mp.erase(index); } // If k is power of 2 if (index >= 0 && ((index - (int)(index)) == 0) && mp.find(k / arr[i]) != mp.end() && (index == arr[i])) { // Pair count count += (mp[arr[i]] * (mp[arr[i]] - 1)) / 2; // After counting erase the element; mp.erase(arr[i]); } } return count;} // Driver Codeint main() { int arr[] = { 1, 2, 16, 4, 4, 4, 8 }; int N = sizeof(arr) / sizeof(arr[0]); int K = 16; cout << countPairsWithProductK(arr, N, K); return 0;}",
"e": 29227,
"s": 27843,
"text": null
},
{
"code": "// Java program for the above approachimport java.util.*; class GFG{ // Function to count // the total number of pairs static int countPairsWithProductK( int arr[], int n, int k) { int count = 0; // Initialize hashmap. HashMap<Integer,Integer> mp = new HashMap<Integer,Integer>(); // Insert array elements to hashmap for (int i = 0; i < n; i++) { if(mp.containsKey(arr[i])){ mp.put(arr[i], mp.get(arr[i])+1); }else{ mp.put(arr[i], 1); } } for (int i = 0; i < n; i++) { int index = (int) (1.0 * k / arr[i]); // If k is not power of two if (index >= 0 && ((index - (int)(index)) == 0) && mp.containsKey(k / arr[i]) && (index != arr[i])) { count += mp.get(arr[i]) * mp.get(index); // After counting erase the element mp.remove(arr[i]); mp.remove(index); } // If k is power of 2 if (index >= 0 && ((index - (int)(index)) == 0) && mp.containsKey(k / arr[i]) && (index == arr[i])) { // Pair count count += (mp.get(arr[i]) * (mp.get(arr[i]) - 1)) / 2; // After counting erase the element; mp.remove(arr[i]); } } return count; } // Driver Code public static void main(String[] args) { int arr[] = { 1, 2, 16, 4, 4, 4, 8 }; int N = arr.length; int K = 16; System.out.print(countPairsWithProductK(arr, N, K)); }} // This code is contributed by 29AjayKumar",
"e": 30743,
"s": 29227,
"text": null
},
{
"code": "# Python 3 program for the above approachfrom collections import defaultdict # Function to count# the total number of pairsdef countPairsWithProductK(arr, n, k): count = 0 # Initialize hashmap. mp = defaultdict(int) # Insert array elements to hashmap for i in range(n): mp[arr[i]] += 1 for i in range(n): index = 1.0 * k / arr[i] # If k is not power of two if (index >= 0 and ((index - (int)(index)) == 0) and (k / arr[i]) in mp and (index != arr[i])): count += mp[arr[i]] * mp[index] # After counting erase the element del mp[arr[i]] del mp[index] # If k is power of 2 if (index >= 0 and ((index - (int)(index)) == 0) and (k / arr[i]) in mp and (index == arr[i])): # Pair count count += ((mp[arr[i]] * (mp[arr[i]] - 1)) / 2) # After counting erase the element; del mp[arr[i]] return count # Driver Codeif __name__ == \"__main__\": arr = [1, 2, 16, 4, 4, 4, 8] N = len(arr) K = 16 print(int(countPairsWithProductK(arr, N, K))) # This code is contributed by ukasp.",
"e": 31986,
"s": 30743,
"text": null
},
{
"code": "// C# program for the above approachusing System;using System.Collections.Generic; public class GFG{ // Function to count // the total number of pairs static int countPairsWithProductK( int []arr, int n, int k) { int count = 0; // Initialize hashmap. Dictionary<int,int> mp = new Dictionary<int,int>(); // Insert array elements to hashmap for (int i = 0; i < n; i++) { if(mp.ContainsKey(arr[i])){ mp[arr[i]] = mp[arr[i]]+1; }else{ mp.Add(arr[i], 1); } } for (int i = 0; i < n; i++) { int index = (int) (1.0 * k / arr[i]); // If k is not power of two if (index >= 0 && ((index - (int)(index)) == 0) && mp.ContainsKey(k / arr[i]) && (index != arr[i])) { count += mp[arr[i]] * mp[index]; // After counting erase the element mp.Remove(arr[i]); mp.Remove(index); } // If k is power of 2 if (index >= 0 && ((index - (int)(index)) == 0) && mp.ContainsKey(k / arr[i]) && (index == arr[i])) { // Pair count count += (mp[arr[i]] * (mp[arr[i]] - 1)) / 2; // After counting erase the element; mp.Remove(arr[i]); } } return count; } // Driver Code public static void Main(String[] args) { int []arr = { 1, 2, 16, 4, 4, 4, 8 }; int N = arr.Length; int K = 16; Console.Write(countPairsWithProductK(arr, N, K)); }} // This code is contributed by Rajput-Ji",
"e": 33496,
"s": 31986,
"text": null
},
{
"code": "<script> // JavaScript code for the above approach // Function to count // the total number of pairs function countPairsWithProductK( arr, n, k) { let count = 0; // Initialize hashmap. let mp = new Map(); // Insert array elements to hashmap for (let i = 0; i < n; i++) { if (mp.has(arr[i])) { mp.set(arr[i], mp.get(arr[i]) + 1); } else { mp.set(arr[i], 1); } } for (let i = 0; i < n; i++) { let index = 1.0 * k / arr[i]; // If k is not power of two if (index >= 0 && ((index - Math.floor(index)) == 0) && mp.has(k / arr[i]) && (index != arr[i])) { count += mp.get(arr[i]) * mp.get(index); // After counting erase the element mp.delete(arr[i]); mp.delete(index); } // If k is power of 2 if (index >= 0 && ((index - Math.floor(index)) == 0) && mp.has(k / arr[i]) && (index == arr[i])) { // Pair count count += (mp.get(arr[i]) * (mp.get(arr[i]) - 1)) / 2; // After counting erase the element; mp.delete(arr[i]); } } return count; } // Driver Code let arr = [1, 2, 16, 4, 4, 4, 8]; let N = arr.length; let K = 16; document.write(countPairsWithProductK(arr, N, K)); // This code is contributed by Potta Lokesh </script>",
"e": 35229,
"s": 33496,
"text": null
},
{
"code": null,
"e": 35234,
"s": 35232,
"text": "5"
},
{
"code": null,
"e": 35279,
"s": 35236,
"text": "Time Complexity: O(N)Auxiliary Space: O(N)"
},
{
"code": null,
"e": 35295,
"s": 35281,
"text": "lokeshpotta20"
},
{
"code": null,
"e": 35301,
"s": 35295,
"text": "ukasp"
},
{
"code": null,
"e": 35313,
"s": 35301,
"text": "29AjayKumar"
},
{
"code": null,
"e": 35323,
"s": 35313,
"text": "Rajput-Ji"
},
{
"code": null,
"e": 35342,
"s": 35323,
"text": "frequency-counting"
},
{
"code": null,
"e": 35349,
"s": 35342,
"text": "Arrays"
},
{
"code": null,
"e": 35363,
"s": 35349,
"text": "Combinatorial"
},
{
"code": null,
"e": 35368,
"s": 35363,
"text": "Hash"
},
{
"code": null,
"e": 35381,
"s": 35368,
"text": "Mathematical"
},
{
"code": null,
"e": 35388,
"s": 35381,
"text": "Arrays"
},
{
"code": null,
"e": 35393,
"s": 35388,
"text": "Hash"
},
{
"code": null,
"e": 35406,
"s": 35393,
"text": "Mathematical"
},
{
"code": null,
"e": 35420,
"s": 35406,
"text": "Combinatorial"
},
{
"code": null,
"e": 35518,
"s": 35420,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 35545,
"s": 35518,
"text": "Count pairs with given sum"
},
{
"code": null,
"e": 35576,
"s": 35545,
"text": "Chocolate Distribution Problem"
},
{
"code": null,
"e": 35601,
"s": 35576,
"text": "Window Sliding Technique"
},
{
"code": null,
"e": 35639,
"s": 35601,
"text": "Reversal algorithm for array rotation"
},
{
"code": null,
"e": 35660,
"s": 35639,
"text": "Next Greater Element"
},
{
"code": null,
"e": 35720,
"s": 35660,
"text": "Write a program to print all permutations of a given string"
},
{
"code": null,
"e": 35758,
"s": 35720,
"text": "Permutation and Combination in Python"
},
{
"code": null,
"e": 35835,
"s": 35758,
"text": "itertools.combinations() module in Python to print all possible combinations"
},
{
"code": null,
"e": 35853,
"s": 35835,
"text": "Combinational Sum"
}
] |
Calculate exponential of a value in Julia - exp(), exp10(), exp2(), expm1() and frexp() Methods - GeeksforGeeks
|
26 Mar, 2020
The exp() is an inbuilt function in julia which is used to calculate the natural base exponential of the specified number.
Syntax: exp(x)
Parameters:
x: Specified values.
Returns: It returns the calculated natural base exponential of the specified number.
Example:
# Julia program to illustrate # the use of exp() method # Getting the natural base exponential# of the specified number.println(exp(0))println(exp(1))println(exp(5))println(exp(-1))
Output:
1.0
2.718281828459045
148.4131591025766
0.36787944117144233
The exp10() is an inbuilt function in julia which is used to calculate the base 10 exponential of the specified number.
Syntax: exp10(x)
Parameters:
x: Specified values.
Returns: It returns the calculated base 10 exponential of the specified number.
Example:
# Julia program to illustrate # the use of exp10() method # Getting the base 10 exponential# of the specified number.println(exp10(0))println(exp10(1))println(exp10(10))println(exp10(-1))
Output:
1.0
10.0
1.0e10
0.1
The exp2() is an inbuilt function in julia which is used to calculate the base 2 exponential of the specified number.
Syntax: exp2(x)
Parameters:
x: Specified values.
Returns: It returns the calculated base 2 exponential of the specified number.
Example:
# Julia program to illustrate # the use of exp2() method # Getting the base 2 exponential# of the specified number.println(exp2(0))println(exp2(1))println(exp2(2))println(exp2(-1))
Output:
1.0
2.0
4.0
0.5
The expm1() is an inbuilt function in julia which is used to accurately calculate .
Syntax: expm1(x)
Parameters:
x: Specified values.
Returns: It returns the calculated value of .
Example:
# Julia program to illustrate # the use of expm1() method # Getting the accurate value# of given expressionprintln(expm1(0))println(expm1(1))println(expm1(2))println(expm1(-1))
Output:
0.0
1.718281828459045
6.38905609893065
-0.6321205588285577
The frexp() is an inbuilt function in julia which is used to return (x, exp), where x is given and having a magnitude in the interval [1/2, 1) or 0.
Syntax: frexp(x)
Parameters:
x: Specified values in the interval [1/2, 1) or 0.
Returns: It returns (x, exp), where x is given and having a magnitude in the interval [1/2, 1) or 0.
Example:
# Julia program to illustrate # the use of frexp() method # Getting (x, exp), where# x is given and having a magnitude# in the interval [1 / 2, 1) or 0.println(frexp(0.6))println(frexp(0.5))println(frexp(0.7))println(frexp(0.9999))
Output:
(0.6, 0)
(0.5, 0)
(0.7, 0)
(0.9999, 0)
Julia
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Decision Making in Julia (if, if-else, Nested-if, if-elseif-else ladder)
Get array dimensions and size of a dimension in Julia - size() Method
Exception handling in Julia
Searching in Array for a given element in Julia
Get number of elements of array in Julia - length() Method
Find maximum element along with its index in Julia - findmax() Method
Join an array of strings into a single string in Julia - join() Method
Working with Excel Files in Julia
File Handling in Julia
Getting last element of an array in Julia - last() Method
|
[
{
"code": null,
"e": 25789,
"s": 25761,
"text": "\n26 Mar, 2020"
},
{
"code": null,
"e": 25912,
"s": 25789,
"text": "The exp() is an inbuilt function in julia which is used to calculate the natural base exponential of the specified number."
},
{
"code": null,
"e": 25927,
"s": 25912,
"text": "Syntax: exp(x)"
},
{
"code": null,
"e": 25939,
"s": 25927,
"text": "Parameters:"
},
{
"code": null,
"e": 25960,
"s": 25939,
"text": "x: Specified values."
},
{
"code": null,
"e": 26045,
"s": 25960,
"text": "Returns: It returns the calculated natural base exponential of the specified number."
},
{
"code": null,
"e": 26054,
"s": 26045,
"text": "Example:"
},
{
"code": "# Julia program to illustrate # the use of exp() method # Getting the natural base exponential# of the specified number.println(exp(0))println(exp(1))println(exp(5))println(exp(-1))",
"e": 26237,
"s": 26054,
"text": null
},
{
"code": null,
"e": 26245,
"s": 26237,
"text": "Output:"
},
{
"code": null,
"e": 26306,
"s": 26245,
"text": "1.0\n2.718281828459045\n148.4131591025766\n0.36787944117144233\n"
},
{
"code": null,
"e": 26426,
"s": 26306,
"text": "The exp10() is an inbuilt function in julia which is used to calculate the base 10 exponential of the specified number."
},
{
"code": null,
"e": 26443,
"s": 26426,
"text": "Syntax: exp10(x)"
},
{
"code": null,
"e": 26455,
"s": 26443,
"text": "Parameters:"
},
{
"code": null,
"e": 26476,
"s": 26455,
"text": "x: Specified values."
},
{
"code": null,
"e": 26556,
"s": 26476,
"text": "Returns: It returns the calculated base 10 exponential of the specified number."
},
{
"code": null,
"e": 26565,
"s": 26556,
"text": "Example:"
},
{
"code": "# Julia program to illustrate # the use of exp10() method # Getting the base 10 exponential# of the specified number.println(exp10(0))println(exp10(1))println(exp10(10))println(exp10(-1))",
"e": 26754,
"s": 26565,
"text": null
},
{
"code": null,
"e": 26762,
"s": 26754,
"text": "Output:"
},
{
"code": null,
"e": 26783,
"s": 26762,
"text": "1.0\n10.0\n1.0e10\n0.1\n"
},
{
"code": null,
"e": 26901,
"s": 26783,
"text": "The exp2() is an inbuilt function in julia which is used to calculate the base 2 exponential of the specified number."
},
{
"code": null,
"e": 26917,
"s": 26901,
"text": "Syntax: exp2(x)"
},
{
"code": null,
"e": 26929,
"s": 26917,
"text": "Parameters:"
},
{
"code": null,
"e": 26950,
"s": 26929,
"text": "x: Specified values."
},
{
"code": null,
"e": 27029,
"s": 26950,
"text": "Returns: It returns the calculated base 2 exponential of the specified number."
},
{
"code": null,
"e": 27038,
"s": 27029,
"text": "Example:"
},
{
"code": "# Julia program to illustrate # the use of exp2() method # Getting the base 2 exponential# of the specified number.println(exp2(0))println(exp2(1))println(exp2(2))println(exp2(-1))",
"e": 27220,
"s": 27038,
"text": null
},
{
"code": null,
"e": 27228,
"s": 27220,
"text": "Output:"
},
{
"code": null,
"e": 27245,
"s": 27228,
"text": "1.0\n2.0\n4.0\n0.5\n"
},
{
"code": null,
"e": 27329,
"s": 27245,
"text": "The expm1() is an inbuilt function in julia which is used to accurately calculate ."
},
{
"code": null,
"e": 27346,
"s": 27329,
"text": "Syntax: expm1(x)"
},
{
"code": null,
"e": 27358,
"s": 27346,
"text": "Parameters:"
},
{
"code": null,
"e": 27379,
"s": 27358,
"text": "x: Specified values."
},
{
"code": null,
"e": 27425,
"s": 27379,
"text": "Returns: It returns the calculated value of ."
},
{
"code": null,
"e": 27434,
"s": 27425,
"text": "Example:"
},
{
"code": "# Julia program to illustrate # the use of expm1() method # Getting the accurate value# of given expressionprintln(expm1(0))println(expm1(1))println(expm1(2))println(expm1(-1))",
"e": 27612,
"s": 27434,
"text": null
},
{
"code": null,
"e": 27620,
"s": 27612,
"text": "Output:"
},
{
"code": null,
"e": 27680,
"s": 27620,
"text": "0.0\n1.718281828459045\n6.38905609893065\n-0.6321205588285577\n"
},
{
"code": null,
"e": 27829,
"s": 27680,
"text": "The frexp() is an inbuilt function in julia which is used to return (x, exp), where x is given and having a magnitude in the interval [1/2, 1) or 0."
},
{
"code": null,
"e": 27846,
"s": 27829,
"text": "Syntax: frexp(x)"
},
{
"code": null,
"e": 27858,
"s": 27846,
"text": "Parameters:"
},
{
"code": null,
"e": 27909,
"s": 27858,
"text": "x: Specified values in the interval [1/2, 1) or 0."
},
{
"code": null,
"e": 28010,
"s": 27909,
"text": "Returns: It returns (x, exp), where x is given and having a magnitude in the interval [1/2, 1) or 0."
},
{
"code": null,
"e": 28019,
"s": 28010,
"text": "Example:"
},
{
"code": "# Julia program to illustrate # the use of frexp() method # Getting (x, exp), where# x is given and having a magnitude# in the interval [1 / 2, 1) or 0.println(frexp(0.6))println(frexp(0.5))println(frexp(0.7))println(frexp(0.9999))",
"e": 28252,
"s": 28019,
"text": null
},
{
"code": null,
"e": 28260,
"s": 28252,
"text": "Output:"
},
{
"code": null,
"e": 28300,
"s": 28260,
"text": "(0.6, 0)\n(0.5, 0)\n(0.7, 0)\n(0.9999, 0)\n"
},
{
"code": null,
"e": 28306,
"s": 28300,
"text": "Julia"
},
{
"code": null,
"e": 28404,
"s": 28306,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28477,
"s": 28404,
"text": "Decision Making in Julia (if, if-else, Nested-if, if-elseif-else ladder)"
},
{
"code": null,
"e": 28547,
"s": 28477,
"text": "Get array dimensions and size of a dimension in Julia - size() Method"
},
{
"code": null,
"e": 28575,
"s": 28547,
"text": "Exception handling in Julia"
},
{
"code": null,
"e": 28623,
"s": 28575,
"text": "Searching in Array for a given element in Julia"
},
{
"code": null,
"e": 28682,
"s": 28623,
"text": "Get number of elements of array in Julia - length() Method"
},
{
"code": null,
"e": 28752,
"s": 28682,
"text": "Find maximum element along with its index in Julia - findmax() Method"
},
{
"code": null,
"e": 28823,
"s": 28752,
"text": "Join an array of strings into a single string in Julia - join() Method"
},
{
"code": null,
"e": 28857,
"s": 28823,
"text": "Working with Excel Files in Julia"
},
{
"code": null,
"e": 28880,
"s": 28857,
"text": "File Handling in Julia"
}
] |
DNA to Protein in Python 3 - GeeksforGeeks
|
25 Aug, 2021
Translation Theory : DNA β RNA β Protein
Life depends on the ability of cells to store, retrieve, and translate genetic instructions.These instructions are needed to make and maintain living organisms. For a long time, it was not clear what molecules were able to copy and transmit genetic information. We now know that this information is carried by the dioxyribonucleic acid or DNA in all living things. DNA: DNA is a discrete code physically present in almost every cell of an organism. We can think of DNA as a one dimensional string of characters with four characters to choose from. These characters are A, C, G, and T. They stand for the first letters with the four nucleotides used to construct DNA. The full names of these nucleotides are Adenine, Cytosine, Guanine, and Thymine. Each unique three character sequence of nucleotides, sometimes called a nucleotide triplet, corresponds to one amino acid. The sequence of amino acids is unique for each type of protein and all proteins are built from the same set of just 20 amino acids for all living things.
Instructions in the DNA are first transcribed into RNA and the RNA is then translated into proteins. We can think of DNA, when read as sequences of three letters, as a dictionary of life. Aim: Convert a given sequence of DNA into its Protein equivalent. Source: Download a DNA strand as a text file from a public web-based repository of DNA sequences from NCBI.The Nucleotide sample is ( NM_207618.2 ), which can be found here.To download the file :
YouTubeAmartya R Saikia273 subscribersTranslation : DNA to ProteinWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:000:00 / 1:14β’Liveβ’<div class="player-unavailable"><h1 class="message">An error occurred.</h1><div class="submessage"><a href="https://www.youtube.com/watch?v=VtP_180hJio" target="_blank">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>
Steps: Required steps to convert DNA sequence to a sequence of Amino acids are :
1. Code to translate the DNA sequence to a sequence of Amino acids where each Amino acid is
represented by a unique letter.
2. Download the Amino acid sequence from NCBI to check our solution.
Coding Translation
The very first step is to put the original unaltered DNA sequence text file into the working path directory.Check your working path directory in the Python shell,
>>>pwd
Next, we need to open the file in Python and read it. By default, the text file contains some unformatted hidden characters. These hidden characters such as β/nβ or β/rβ needs to be formatted and removed. So we use replace() function and get the altered DNA sequence txt file from the Original txt file.
Python
inputfile ="DNA_sequence_original.txt"f = open(inputfile, "r")seq = f.read() seq = seq.replace("\n", "")seq = seq.replace("\r", "")
Next, we will build a function called translate() which will convert the altered DNA sequence into its Protein equivalent and return it. We will feed the altered DNA sequence as a parameter to the function.
Python
def translate(seq): table = { 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_', 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W', } protein ="" if len(seq)%3 == 0: for i in range(0, len(seq), 3): codon = seq[i:i + 3] protein+= table[codon] return protein
The table in the code above is for reference and can be found in biology manuals. Since triplet nucleotide called the codon forms a single amino acid, so we check if the altered DNA sequence is divisible by 3 in ( if len(seq)%3 == 0: ). Next, the code is self explanatory where we form codons and match them with the Amino acids in the table. Atlast, we form the Amino acid sequence also called the Protein and return it.
The last step is to match our Amino Acid sequence with that to the original one found on the NCBI website. We will compare both of the Amino acid sequences in Python, character by character and return true if both are exactly the same copy. First download the unaltered amino acid sequence txt file and open it in Python. We will build a function called read_seq() to remove the unwanted characters and form the altered amino acidβs sequence txt file.
Python
def read_seq(inputfile): with open(inputfile, "r") as f: seq = f.read() seq = seq.replace("\n", "") seq = seq.replace("\r", "") return seq
The last step is to compare both the files and check if both are the same.If the output is true, we have succeeded in translating DNA to Protein.
Final Code
Python
# Python program to convert# altered DNA to protein inputfile ="DNA_sequence_original.txt"f = open(inputfile, "r")seq = f.read() seq = seq.replace("\n", "")seq = seq.replace("\r", "") def translate(seq): table = { 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_', 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W', } protein ="" if len(seq)%3 == 0: for i in range(0, len(seq), 3): codon = seq[i:i + 3] protein+= table[codon] return proteindef read_seq(inputfile): with open(inputfile, "r") as f: seq = f.read() seq = seq.replace("\n", "") seq = seq.replace("\r", "") return seq prt = read_seq("amino_acid_sequence_original.txt")dna = read_seq("DNA_sequence_original.txt") p = translate(dna[20:935])p == prt
Output : True
Reference :
edX β HarvardX β Using Python for Research
This article is contributed by Amartya Ranjan Saikia. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
arorakashish0911
GBlog
Project
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
DSA Sheet by Love Babbar
How to Start Learning DSA?
Introduction to Recurrent Neural Network
12 pip Commands For Python Developers
A Freshers Guide To Programming
SDE SHEET - A Complete Guide for SDE Preparation
Working with zip files in Python
Python | Simple GUI calculator using Tkinter
Working with Images in Python
Simple Chat Room using Python
|
[
{
"code": null,
"e": 26297,
"s": 26269,
"text": "\n25 Aug, 2021"
},
{
"code": null,
"e": 26338,
"s": 26297,
"text": "Translation Theory : DNA β RNA β Protein"
},
{
"code": null,
"e": 27364,
"s": 26338,
"text": "Life depends on the ability of cells to store, retrieve, and translate genetic instructions.These instructions are needed to make and maintain living organisms. For a long time, it was not clear what molecules were able to copy and transmit genetic information. We now know that this information is carried by the dioxyribonucleic acid or DNA in all living things. DNA: DNA is a discrete code physically present in almost every cell of an organism. We can think of DNA as a one dimensional string of characters with four characters to choose from. These characters are A, C, G, and T. They stand for the first letters with the four nucleotides used to construct DNA. The full names of these nucleotides are Adenine, Cytosine, Guanine, and Thymine. Each unique three character sequence of nucleotides, sometimes called a nucleotide triplet, corresponds to one amino acid. The sequence of amino acids is unique for each type of protein and all proteins are built from the same set of just 20 amino acids for all living things. "
},
{
"code": null,
"e": 27815,
"s": 27364,
"text": "Instructions in the DNA are first transcribed into RNA and the RNA is then translated into proteins. We can think of DNA, when read as sequences of three letters, as a dictionary of life. Aim: Convert a given sequence of DNA into its Protein equivalent. Source: Download a DNA strand as a text file from a public web-based repository of DNA sequences from NCBI.The Nucleotide sample is ( NM_207618.2 ), which can be found here.To download the file : "
},
{
"code": null,
"e": 28628,
"s": 27815,
"text": "YouTubeAmartya R Saikia273 subscribersTranslation : DNA to ProteinWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:000:00 / 1:14β’Liveβ’<div class=\"player-unavailable\"><h1 class=\"message\">An error occurred.</h1><div class=\"submessage\"><a href=\"https://www.youtube.com/watch?v=VtP_180hJio\" target=\"_blank\">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>"
},
{
"code": null,
"e": 28710,
"s": 28628,
"text": "Steps: Required steps to convert DNA sequence to a sequence of Amino acids are : "
},
{
"code": null,
"e": 28906,
"s": 28710,
"text": "1. Code to translate the DNA sequence to a sequence of Amino acids where each Amino acid is\n represented by a unique letter.\n2. Download the Amino acid sequence from NCBI to check our solution."
},
{
"code": null,
"e": 28925,
"s": 28906,
"text": "Coding Translation"
},
{
"code": null,
"e": 29089,
"s": 28925,
"text": "The very first step is to put the original unaltered DNA sequence text file into the working path directory.Check your working path directory in the Python shell, "
},
{
"code": null,
"e": 29096,
"s": 29089,
"text": ">>>pwd"
},
{
"code": null,
"e": 29401,
"s": 29096,
"text": "Next, we need to open the file in Python and read it. By default, the text file contains some unformatted hidden characters. These hidden characters such as β/nβ or β/rβ needs to be formatted and removed. So we use replace() function and get the altered DNA sequence txt file from the Original txt file. "
},
{
"code": null,
"e": 29408,
"s": 29401,
"text": "Python"
},
{
"code": "inputfile =\"DNA_sequence_original.txt\"f = open(inputfile, \"r\")seq = f.read() seq = seq.replace(\"\\n\", \"\")seq = seq.replace(\"\\r\", \"\")",
"e": 29541,
"s": 29408,
"text": null
},
{
"code": null,
"e": 29749,
"s": 29541,
"text": "Next, we will build a function called translate() which will convert the altered DNA sequence into its Protein equivalent and return it. We will feed the altered DNA sequence as a parameter to the function. "
},
{
"code": null,
"e": 29756,
"s": 29749,
"text": "Python"
},
{
"code": "def translate(seq): table = { 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_', 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W', } protein =\"\" if len(seq)%3 == 0: for i in range(0, len(seq), 3): codon = seq[i:i + 3] protein+= table[codon] return protein",
"e": 30793,
"s": 29756,
"text": null
},
{
"code": null,
"e": 31216,
"s": 30793,
"text": "The table in the code above is for reference and can be found in biology manuals. Since triplet nucleotide called the codon forms a single amino acid, so we check if the altered DNA sequence is divisible by 3 in ( if len(seq)%3 == 0: ). Next, the code is self explanatory where we form codons and match them with the Amino acids in the table. Atlast, we form the Amino acid sequence also called the Protein and return it. "
},
{
"code": null,
"e": 31669,
"s": 31216,
"text": "The last step is to match our Amino Acid sequence with that to the original one found on the NCBI website. We will compare both of the Amino acid sequences in Python, character by character and return true if both are exactly the same copy. First download the unaltered amino acid sequence txt file and open it in Python. We will build a function called read_seq() to remove the unwanted characters and form the altered amino acidβs sequence txt file. "
},
{
"code": null,
"e": 31676,
"s": 31669,
"text": "Python"
},
{
"code": "def read_seq(inputfile): with open(inputfile, \"r\") as f: seq = f.read() seq = seq.replace(\"\\n\", \"\") seq = seq.replace(\"\\r\", \"\") return seq",
"e": 31834,
"s": 31676,
"text": null
},
{
"code": null,
"e": 31981,
"s": 31834,
"text": "The last step is to compare both the files and check if both are the same.If the output is true, we have succeeded in translating DNA to Protein. "
},
{
"code": null,
"e": 31993,
"s": 31981,
"text": "Final Code "
},
{
"code": null,
"e": 32000,
"s": 31993,
"text": "Python"
},
{
"code": "# Python program to convert# altered DNA to protein inputfile =\"DNA_sequence_original.txt\"f = open(inputfile, \"r\")seq = f.read() seq = seq.replace(\"\\n\", \"\")seq = seq.replace(\"\\r\", \"\") def translate(seq): table = { 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_', 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W', } protein =\"\" if len(seq)%3 == 0: for i in range(0, len(seq), 3): codon = seq[i:i + 3] protein+= table[codon] return proteindef read_seq(inputfile): with open(inputfile, \"r\") as f: seq = f.read() seq = seq.replace(\"\\n\", \"\") seq = seq.replace(\"\\r\", \"\") return seq prt = read_seq(\"amino_acid_sequence_original.txt\")dna = read_seq(\"DNA_sequence_original.txt\") p = translate(dna[20:935])p == prt",
"e": 33507,
"s": 32000,
"text": null
},
{
"code": null,
"e": 33521,
"s": 33507,
"text": "Output : True"
},
{
"code": null,
"e": 33534,
"s": 33521,
"text": "Reference : "
},
{
"code": null,
"e": 33577,
"s": 33534,
"text": "edX β HarvardX β Using Python for Research"
},
{
"code": null,
"e": 34007,
"s": 33577,
"text": "This article is contributed by Amartya Ranjan Saikia. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "
},
{
"code": null,
"e": 34024,
"s": 34007,
"text": "arorakashish0911"
},
{
"code": null,
"e": 34030,
"s": 34024,
"text": "GBlog"
},
{
"code": null,
"e": 34038,
"s": 34030,
"text": "Project"
},
{
"code": null,
"e": 34136,
"s": 34038,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 34161,
"s": 34136,
"text": "DSA Sheet by Love Babbar"
},
{
"code": null,
"e": 34188,
"s": 34161,
"text": "How to Start Learning DSA?"
},
{
"code": null,
"e": 34229,
"s": 34188,
"text": "Introduction to Recurrent Neural Network"
},
{
"code": null,
"e": 34267,
"s": 34229,
"text": "12 pip Commands For Python Developers"
},
{
"code": null,
"e": 34299,
"s": 34267,
"text": "A Freshers Guide To Programming"
},
{
"code": null,
"e": 34348,
"s": 34299,
"text": "SDE SHEET - A Complete Guide for SDE Preparation"
},
{
"code": null,
"e": 34381,
"s": 34348,
"text": "Working with zip files in Python"
},
{
"code": null,
"e": 34426,
"s": 34381,
"text": "Python | Simple GUI calculator using Tkinter"
},
{
"code": null,
"e": 34456,
"s": 34426,
"text": "Working with Images in Python"
}
] |
HTML | Canvas Draw Bezier Curve - GeeksforGeeks
|
27 Feb, 2019
Curves on HTML canvas can be drawn using arcs, but drawing a complex diagram using arcs is quite a tedious task. In the given circumstance, Bezier curve will be very useful in providing more flexibility in drawing curves. Bezier curves on HTML canvas are drawn using a start point, one or more control point/points and an endpoint. Example: In the case of drawing a landscape, real-world objects, irregular shapes etc.Bezier Curves can be drawn in two ways:
Quadratic Bezier Curve
Cubic Bezier Curve
Quadratic Bezier Curve: This curve is controlled by one control point.
Syntax:
moveTo(start_pnt_X, start_pnt_Y);
context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_y);
Example: This example create a curve using quadratic bezier curve.
<!DOCTYPE html> <html> <head> <title> Quadratic Bezier Curve </title></head> <body> <h1>Quadratic Bezier Curve</h1> <canvas id="CanvasOfGeeks" width="400" height="200" style="border:solid 4px green"> <script> var c = document.getElementById("CanvasOfGeeks"); var context = c.getContext("2d"); var start_pnt_X = 50; var start_pnt_Y = 150; var cntrl_pnt_X = 200; var cntrl_pnt_Y = 30; var end_pnt_X = 350; var end_pnt_Y = 150; /* Start new path */ context.beginPath(); context.lineWidth=3; context.strokeText( ".", cntrl_pnt_X, cntrl_pnt_Y); /* Starting point of the curve */ context.moveTo(start_pnt_X, start_pnt_Y); context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_Y); /* drawing line on the canvas */ context.stroke(); </script> </body> </html>
Output:
Explanation:
Pre-requisite: HTML Canvas Basics
First Line: Reference for canvas object is stored in variable βcβ using DOM concept.Second Line: Without having drawing context of canvas nothing can be drawn on it.var c = document.getElementById("CanvasOfGeeks");
var context = c.getContext("2d");
var c = document.getElementById("CanvasOfGeeks");
var context = c.getContext("2d");
One can change the width of line by overriding the value of βlineWidthβ attribute of context object.context.lineWidth=3;
context.lineWidth=3;
For putting a dot over the coordinate of control point.You can see the dot in the figure shown above.context.strokeText( ".", cntrl_pnt_X, cntrl_pnt_Y);
context.strokeText( ".", cntrl_pnt_X, cntrl_pnt_Y);
This function is used to draw a curve from the start point mentioned in function.context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_Y);
context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_Y);
This function is used to move the context.context.moveTo(start_pnt_X, start_pnt_Y);
context.moveTo(start_pnt_X, start_pnt_Y);
Note: Please keep the control point within the canvas boundary.
Cubic Bezier Curve: This curve is controlled by two control points.
Syntax:
moveTo(start_pnt_X, start_pnt_Y);
contex.bezierCurveTo(cntrl_pnt_1_X, cntrl_pnt_1_Y, cntrl_pnt_2_X,
cntrl_pnt_2_Y, end_pnt_X, end_pnt_y);
Example: This example create a curve using cubic bezier curve.
<!DOCTYPE html> <html> <head> <title> Cubic Bezier Curve </title></head> <body> <h1>Cubic Bezier Curve</h1> <canvas id="CanvasOfGeeks" width="400" height="200" style="border:solid 4px green"> <script> var c = document.getElementById("CanvasOfGeeks"); var context = c.getContext("2d"); var start_pnt_X = 50; var start_pnt_Y = 100; var cntrl_pnt_1_X = 150; var cntrl_pnt_1_Y = 30; var cntrl_pnt_2_X = 250; var cntrl_pnt_2_Y = 170; var end_pnt_X = 350; var end_pnt_Y = 150; /* Start a new Path */ context.beginPath(); context.lineWidth=3; /* Representing first control point */ context.strokeText( ".", cntrl_pnt_1_X, cntrl_pnt_1_Y); /* Representing second control point */ context.strokeText( ".", cntrl_pnt_2_X, cntrl_pnt_2_Y); /* Starting point of the curve */ context.moveTo(start_pnt_X, start_pnt_Y); context.bezierCurveTo(cntrl_pnt_1_X, cntrl_pnt_1_Y, cntrl_pnt_2_X, cntrl_pnt_2_Y, end_pnt_X, end_pnt_Y); /* Drawing line on the canvas */ context.stroke(); </script> </body> </html>
Output:
Example: This example draw a fish using Bezier Curve.Input:
<!DOCTYPE html><html> <head> <title> Drawing a fish using Bezier Curve </title></head> <body> <canvas id="CanvasOfGeeks" width="400" height="200" style="border:solid 4px green"> <script> var c = document.getElementById("CanvasOfGeeks"); var context = c.getContext("2d"); /* Start a new Path */ context.beginPath(); context.lineWidth=3; /* Upper curve of the fish, from mouth to tail */ context.moveTo(60, 120); context.bezierCurveTo(90, 30, 200, 130, 310, 55); /* Lower curve of the fish, from mouth to tail */ context.moveTo(60, 120); context.bezierCurveTo(90, 170, 200, 110, 310, 160); /* Upper half of tail */ context.moveTo(310, 55); context.quadraticCurveTo(320, 80, 280, 110); /* lower half of tail */ context.moveTo(310, 160); context.quadraticCurveTo(320, 120, 280, 110); /* Eye of the fish */ context.moveTo(100, 100); context.arc(100, 100, 5, 0, 2*Math.PI); /* Mouth of the fish */ context.moveTo(60, 120); context.lineTo(80, 120); context.stroke(); </script> </body> </html>
Output:
HTML-Misc
HTML
JavaScript
Web Technologies
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Insert Form Data into Database using PHP ?
REST API (Introduction)
Types of CSS (Cascading Style Sheet)
HTML | <img> align Attribute
How to position a div at the bottom of its container using CSS?
Remove elements from a JavaScript Array
Convert a string to an integer in JavaScript
Difference between var, let and const keywords in JavaScript
Differences between Functional Components and Class Components in React
How to calculate the number of days between two dates in javascript?
|
[
{
"code": null,
"e": 25575,
"s": 25547,
"text": "\n27 Feb, 2019"
},
{
"code": null,
"e": 26033,
"s": 25575,
"text": "Curves on HTML canvas can be drawn using arcs, but drawing a complex diagram using arcs is quite a tedious task. In the given circumstance, Bezier curve will be very useful in providing more flexibility in drawing curves. Bezier curves on HTML canvas are drawn using a start point, one or more control point/points and an endpoint. Example: In the case of drawing a landscape, real-world objects, irregular shapes etc.Bezier Curves can be drawn in two ways:"
},
{
"code": null,
"e": 26056,
"s": 26033,
"text": "Quadratic Bezier Curve"
},
{
"code": null,
"e": 26075,
"s": 26056,
"text": "Cubic Bezier Curve"
},
{
"code": null,
"e": 26146,
"s": 26075,
"text": "Quadratic Bezier Curve: This curve is controlled by one control point."
},
{
"code": null,
"e": 26154,
"s": 26146,
"text": "Syntax:"
},
{
"code": null,
"e": 26263,
"s": 26154,
"text": "moveTo(start_pnt_X, start_pnt_Y);\ncontext.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_y);\n"
},
{
"code": null,
"e": 26330,
"s": 26263,
"text": "Example: This example create a curve using quadratic bezier curve."
},
{
"code": "<!DOCTYPE html> <html> <head> <title> Quadratic Bezier Curve </title></head> <body> <h1>Quadratic Bezier Curve</h1> <canvas id=\"CanvasOfGeeks\" width=\"400\" height=\"200\" style=\"border:solid 4px green\"> <script> var c = document.getElementById(\"CanvasOfGeeks\"); var context = c.getContext(\"2d\"); var start_pnt_X = 50; var start_pnt_Y = 150; var cntrl_pnt_X = 200; var cntrl_pnt_Y = 30; var end_pnt_X = 350; var end_pnt_Y = 150; /* Start new path */ context.beginPath(); context.lineWidth=3; context.strokeText( \".\", cntrl_pnt_X, cntrl_pnt_Y); /* Starting point of the curve */ context.moveTo(start_pnt_X, start_pnt_Y); context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_Y); /* drawing line on the canvas */ context.stroke(); </script> </body> </html> ",
"e": 27376,
"s": 26330,
"text": null
},
{
"code": null,
"e": 27384,
"s": 27376,
"text": "Output:"
},
{
"code": null,
"e": 27397,
"s": 27384,
"text": "Explanation:"
},
{
"code": null,
"e": 27431,
"s": 27397,
"text": "Pre-requisite: HTML Canvas Basics"
},
{
"code": null,
"e": 27689,
"s": 27431,
"text": "First Line: Reference for canvas object is stored in variable βcβ using DOM concept.Second Line: Without having drawing context of canvas nothing can be drawn on it.var c = document.getElementById(\"CanvasOfGeeks\"); \n var context = c.getContext(\"2d\");\n"
},
{
"code": null,
"e": 27782,
"s": 27689,
"text": "var c = document.getElementById(\"CanvasOfGeeks\"); \n var context = c.getContext(\"2d\");\n"
},
{
"code": null,
"e": 27903,
"s": 27782,
"text": "One can change the width of line by overriding the value of βlineWidthβ attribute of context object.context.lineWidth=3;"
},
{
"code": null,
"e": 27924,
"s": 27903,
"text": "context.lineWidth=3;"
},
{
"code": null,
"e": 28077,
"s": 27924,
"text": "For putting a dot over the coordinate of control point.You can see the dot in the figure shown above.context.strokeText( \".\", cntrl_pnt_X, cntrl_pnt_Y);"
},
{
"code": null,
"e": 28129,
"s": 28077,
"text": "context.strokeText( \".\", cntrl_pnt_X, cntrl_pnt_Y);"
},
{
"code": null,
"e": 28284,
"s": 28129,
"text": "This function is used to draw a curve from the start point mentioned in function.context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_Y);"
},
{
"code": null,
"e": 28358,
"s": 28284,
"text": "context.quadraticCurveTo(cntrl_pnt_X, cntrl_pnt_Y, end_pnt_X, end_pnt_Y);"
},
{
"code": null,
"e": 28442,
"s": 28358,
"text": "This function is used to move the context.context.moveTo(start_pnt_X, start_pnt_Y);"
},
{
"code": null,
"e": 28484,
"s": 28442,
"text": "context.moveTo(start_pnt_X, start_pnt_Y);"
},
{
"code": null,
"e": 28548,
"s": 28484,
"text": "Note: Please keep the control point within the canvas boundary."
},
{
"code": null,
"e": 28616,
"s": 28548,
"text": "Cubic Bezier Curve: This curve is controlled by two control points."
},
{
"code": null,
"e": 28624,
"s": 28616,
"text": "Syntax:"
},
{
"code": null,
"e": 28785,
"s": 28624,
"text": "moveTo(start_pnt_X, start_pnt_Y);\ncontex.bezierCurveTo(cntrl_pnt_1_X, cntrl_pnt_1_Y, cntrl_pnt_2_X,\n cntrl_pnt_2_Y, end_pnt_X, end_pnt_y);\n"
},
{
"code": null,
"e": 28848,
"s": 28785,
"text": "Example: This example create a curve using cubic bezier curve."
},
{
"code": "<!DOCTYPE html> <html> <head> <title> Cubic Bezier Curve </title></head> <body> <h1>Cubic Bezier Curve</h1> <canvas id=\"CanvasOfGeeks\" width=\"400\" height=\"200\" style=\"border:solid 4px green\"> <script> var c = document.getElementById(\"CanvasOfGeeks\"); var context = c.getContext(\"2d\"); var start_pnt_X = 50; var start_pnt_Y = 100; var cntrl_pnt_1_X = 150; var cntrl_pnt_1_Y = 30; var cntrl_pnt_2_X = 250; var cntrl_pnt_2_Y = 170; var end_pnt_X = 350; var end_pnt_Y = 150; /* Start a new Path */ context.beginPath(); context.lineWidth=3; /* Representing first control point */ context.strokeText( \".\", cntrl_pnt_1_X, cntrl_pnt_1_Y); /* Representing second control point */ context.strokeText( \".\", cntrl_pnt_2_X, cntrl_pnt_2_Y); /* Starting point of the curve */ context.moveTo(start_pnt_X, start_pnt_Y); context.bezierCurveTo(cntrl_pnt_1_X, cntrl_pnt_1_Y, cntrl_pnt_2_X, cntrl_pnt_2_Y, end_pnt_X, end_pnt_Y); /* Drawing line on the canvas */ context.stroke(); </script> </body> </html> ",
"e": 30159,
"s": 28848,
"text": null
},
{
"code": null,
"e": 30167,
"s": 30159,
"text": "Output:"
},
{
"code": null,
"e": 30227,
"s": 30167,
"text": "Example: This example draw a fish using Bezier Curve.Input:"
},
{
"code": "<!DOCTYPE html><html> <head> <title> Drawing a fish using Bezier Curve </title></head> <body> <canvas id=\"CanvasOfGeeks\" width=\"400\" height=\"200\" style=\"border:solid 4px green\"> <script> var c = document.getElementById(\"CanvasOfGeeks\"); var context = c.getContext(\"2d\"); /* Start a new Path */ context.beginPath(); context.lineWidth=3; /* Upper curve of the fish, from mouth to tail */ context.moveTo(60, 120); context.bezierCurveTo(90, 30, 200, 130, 310, 55); /* Lower curve of the fish, from mouth to tail */ context.moveTo(60, 120); context.bezierCurveTo(90, 170, 200, 110, 310, 160); /* Upper half of tail */ context.moveTo(310, 55); context.quadraticCurveTo(320, 80, 280, 110); /* lower half of tail */ context.moveTo(310, 160); context.quadraticCurveTo(320, 120, 280, 110); /* Eye of the fish */ context.moveTo(100, 100); context.arc(100, 100, 5, 0, 2*Math.PI); /* Mouth of the fish */ context.moveTo(60, 120); context.lineTo(80, 120); context.stroke(); </script> </body> </html> ",
"e": 31544,
"s": 30227,
"text": null
},
{
"code": null,
"e": 31552,
"s": 31544,
"text": "Output:"
},
{
"code": null,
"e": 31562,
"s": 31552,
"text": "HTML-Misc"
},
{
"code": null,
"e": 31567,
"s": 31562,
"text": "HTML"
},
{
"code": null,
"e": 31578,
"s": 31567,
"text": "JavaScript"
},
{
"code": null,
"e": 31595,
"s": 31578,
"text": "Web Technologies"
},
{
"code": null,
"e": 31600,
"s": 31595,
"text": "HTML"
},
{
"code": null,
"e": 31698,
"s": 31600,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 31748,
"s": 31698,
"text": "How to Insert Form Data into Database using PHP ?"
},
{
"code": null,
"e": 31772,
"s": 31748,
"text": "REST API (Introduction)"
},
{
"code": null,
"e": 31809,
"s": 31772,
"text": "Types of CSS (Cascading Style Sheet)"
},
{
"code": null,
"e": 31838,
"s": 31809,
"text": "HTML | <img> align Attribute"
},
{
"code": null,
"e": 31902,
"s": 31838,
"text": "How to position a div at the bottom of its container using CSS?"
},
{
"code": null,
"e": 31942,
"s": 31902,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 31987,
"s": 31942,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 32048,
"s": 31987,
"text": "Difference between var, let and const keywords in JavaScript"
},
{
"code": null,
"e": 32120,
"s": 32048,
"text": "Differences between Functional Components and Class Components in React"
}
] |
Matplotlib.axes.Axes.set() in Python - GeeksforGeeks
|
19 Apr, 2020
Matplotlib is a library in Python and it is numerical β mathematical extension for NumPy library. The Axes Class contains most of the figure elements: Axis, Tick, Line2D, Text, Polygon, etc., and sets the coordinate system. And the instances of Axes supports callbacks through a callbacks attribute.
The Axes.set() function in axes module of matplotlib library is a property batch setter. Pass kwargs to set properties.
Syntax: Axes.set(self, **kwargs)
Parameters: This method does not accepts any parameters other than **kwargs.
Below examples illustrate the matplotlib.axes.Axes.set() function in matplotlib.axes:
Example 1:
# Implementation of matplotlib functionimport matplotlibimport matplotlib.pyplot as pltimport numpy as np t = np.arange(0.0, 2, 0.001)s = 1 + np.sin(8 * np.pi * t)*0.4 fig, ax = plt.subplots()ax.plot(t, s) ax.set(xlabel ='X-Axis', ylabel ='Y-Axis', xlim =(0, 1.5), ylim =(0.5, 1.5), title ='matplotlib.axes.Axes.set()\ function Example')ax.grid() plt.show()
Output:
Example 2:
# Implementation of matplotlib functionimport numpy as npimport matplotlib.pyplot as pltnp.random.seed(19680801) fig, ax = plt.subplots() x, y, s, c = np.random.rand(4, 200)s *= 200 ax.scatter(x, y, s, c) ax.set(xlabel ='X-Axis', ylabel ='Y-Axis', xlim =(0, 0.5), ylim =(0, 0.5), title ='matplotlib.axes.Axes.set()\ function Example')ax.grid() plt.show()
Output:
Python-matplotlib
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
Read a file line by line in Python
How to Install PIP on Windows ?
Enumerate() in Python
Different ways to create Pandas Dataframe
Iterate over a list in Python
Python String | replace()
*args and **kwargs in Python
Reading and Writing to text files in Python
Create a Pandas DataFrame from Lists
|
[
{
"code": null,
"e": 26023,
"s": 25995,
"text": "\n19 Apr, 2020"
},
{
"code": null,
"e": 26323,
"s": 26023,
"text": "Matplotlib is a library in Python and it is numerical β mathematical extension for NumPy library. The Axes Class contains most of the figure elements: Axis, Tick, Line2D, Text, Polygon, etc., and sets the coordinate system. And the instances of Axes supports callbacks through a callbacks attribute."
},
{
"code": null,
"e": 26443,
"s": 26323,
"text": "The Axes.set() function in axes module of matplotlib library is a property batch setter. Pass kwargs to set properties."
},
{
"code": null,
"e": 26476,
"s": 26443,
"text": "Syntax: Axes.set(self, **kwargs)"
},
{
"code": null,
"e": 26553,
"s": 26476,
"text": "Parameters: This method does not accepts any parameters other than **kwargs."
},
{
"code": null,
"e": 26639,
"s": 26553,
"text": "Below examples illustrate the matplotlib.axes.Axes.set() function in matplotlib.axes:"
},
{
"code": null,
"e": 26650,
"s": 26639,
"text": "Example 1:"
},
{
"code": "# Implementation of matplotlib functionimport matplotlibimport matplotlib.pyplot as pltimport numpy as np t = np.arange(0.0, 2, 0.001)s = 1 + np.sin(8 * np.pi * t)*0.4 fig, ax = plt.subplots()ax.plot(t, s) ax.set(xlabel ='X-Axis', ylabel ='Y-Axis', xlim =(0, 1.5), ylim =(0.5, 1.5), title ='matplotlib.axes.Axes.set()\\ function Example')ax.grid() plt.show()",
"e": 27028,
"s": 26650,
"text": null
},
{
"code": null,
"e": 27036,
"s": 27028,
"text": "Output:"
},
{
"code": null,
"e": 27047,
"s": 27036,
"text": "Example 2:"
},
{
"code": "# Implementation of matplotlib functionimport numpy as npimport matplotlib.pyplot as pltnp.random.seed(19680801) fig, ax = plt.subplots() x, y, s, c = np.random.rand(4, 200)s *= 200 ax.scatter(x, y, s, c) ax.set(xlabel ='X-Axis', ylabel ='Y-Axis', xlim =(0, 0.5), ylim =(0, 0.5), title ='matplotlib.axes.Axes.set()\\ function Example')ax.grid() plt.show()",
"e": 27419,
"s": 27047,
"text": null
},
{
"code": null,
"e": 27427,
"s": 27419,
"text": "Output:"
},
{
"code": null,
"e": 27445,
"s": 27427,
"text": "Python-matplotlib"
},
{
"code": null,
"e": 27452,
"s": 27445,
"text": "Python"
},
{
"code": null,
"e": 27550,
"s": 27452,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27568,
"s": 27550,
"text": "Python Dictionary"
},
{
"code": null,
"e": 27603,
"s": 27568,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 27635,
"s": 27603,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 27657,
"s": 27635,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 27699,
"s": 27657,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 27729,
"s": 27699,
"text": "Iterate over a list in Python"
},
{
"code": null,
"e": 27755,
"s": 27729,
"text": "Python String | replace()"
},
{
"code": null,
"e": 27784,
"s": 27755,
"text": "*args and **kwargs in Python"
},
{
"code": null,
"e": 27828,
"s": 27784,
"text": "Reading and Writing to text files in Python"
}
] |
Highlight Pandas DataFrame's specific columns using apply() - GeeksforGeeks
|
17 Aug, 2020
Let us see how to highlight specific columns of a Pandas DataFrame. We can do this using the apply() function of the Styler class.
Syntax : Styler.apply(func, axis = 0, subset = None, **kwargs)
Parameters :
func : function should take a Series or DataFrame (depending on-axis), and return an object with the same shape. Must return a DataFrame with identical index and column labels when axis = None.
axis : apply to each column (axis=0 or βindexβ) or to each row (axis=1 or βcolumnsβ) or to the entire DataFrame at once with axis = None
subset : valid indexer to limit data to before applying the function.
**kwargs : dict pass along to func.
Returns : Styler
Letβs understand with examples:
Example 1 :
# importing pandas as pd import pandas as pd # creating the dataframedf = pd.DataFrame({"A" : [14, 4, 5, 4, 1], "B" : [5, 2, 54, 3, 2], "C" : [20, 20, 7, 3, 8], "D" : [14, 3, 6, 2, 6], "E" : [23, 45, 64, 32, 23]}) print("Original DataFrame :")display(df) # function definitiondef highlight_cols(x): # copy df to new - original data is not changed df = x.copy() # select all values to green color df.loc[:, :] = 'background-color: green' # overwrite values grey color df[['B', 'C', 'E']] = 'background-color: grey' # return color df return df print("Highlighted DataFrame :")display(df.style.apply(highlight_cols, axis = None))
Output :
Example 2 :
# importing pandas as pd import pandas as pd # creating the dataframedf = pd.DataFrame({"Name" : ["Yash", "Ankit", "Rao"], "Age" : [5, 2, 54]}) print("Original DataFrame :")display(df) # function definitiondef highlight_cols(x): # copy df to new - original data is not changed df = x.copy() # select all values to yellow color df.loc[:, :] = 'background-color: yellow' # return color df return df print("Highlighted DataFrame :")display(df.style.apply(highlight_cols, axis = None))
Output :
Python pandas-dataFrame
Python-pandas
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
Read a file line by line in Python
How to Install PIP on Windows ?
Enumerate() in Python
Iterate over a list in Python
Python String | replace()
*args and **kwargs in Python
Reading and Writing to text files in Python
Create a Pandas DataFrame from Lists
Convert integer to string in Python
|
[
{
"code": null,
"e": 26359,
"s": 26331,
"text": "\n17 Aug, 2020"
},
{
"code": null,
"e": 26490,
"s": 26359,
"text": "Let us see how to highlight specific columns of a Pandas DataFrame. We can do this using the apply() function of the Styler class."
},
{
"code": null,
"e": 26553,
"s": 26490,
"text": "Syntax : Styler.apply(func, axis = 0, subset = None, **kwargs)"
},
{
"code": null,
"e": 26566,
"s": 26553,
"text": "Parameters :"
},
{
"code": null,
"e": 26760,
"s": 26566,
"text": "func : function should take a Series or DataFrame (depending on-axis), and return an object with the same shape. Must return a DataFrame with identical index and column labels when axis = None."
},
{
"code": null,
"e": 26897,
"s": 26760,
"text": "axis : apply to each column (axis=0 or βindexβ) or to each row (axis=1 or βcolumnsβ) or to the entire DataFrame at once with axis = None"
},
{
"code": null,
"e": 26967,
"s": 26897,
"text": "subset : valid indexer to limit data to before applying the function."
},
{
"code": null,
"e": 27003,
"s": 26967,
"text": "**kwargs : dict pass along to func."
},
{
"code": null,
"e": 27020,
"s": 27003,
"text": "Returns : Styler"
},
{
"code": null,
"e": 27052,
"s": 27020,
"text": "Letβs understand with examples:"
},
{
"code": null,
"e": 27064,
"s": 27052,
"text": "Example 1 :"
},
{
"code": "# importing pandas as pd import pandas as pd # creating the dataframedf = pd.DataFrame({\"A\" : [14, 4, 5, 4, 1], \"B\" : [5, 2, 54, 3, 2], \"C\" : [20, 20, 7, 3, 8], \"D\" : [14, 3, 6, 2, 6], \"E\" : [23, 45, 64, 32, 23]}) print(\"Original DataFrame :\")display(df) # function definitiondef highlight_cols(x): # copy df to new - original data is not changed df = x.copy() # select all values to green color df.loc[:, :] = 'background-color: green' # overwrite values grey color df[['B', 'C', 'E']] = 'background-color: grey' # return color df return df print(\"Highlighted DataFrame :\")display(df.style.apply(highlight_cols, axis = None))",
"e": 27819,
"s": 27064,
"text": null
},
{
"code": null,
"e": 27828,
"s": 27819,
"text": "Output :"
},
{
"code": null,
"e": 27840,
"s": 27828,
"text": "Example 2 :"
},
{
"code": "# importing pandas as pd import pandas as pd # creating the dataframedf = pd.DataFrame({\"Name\" : [\"Yash\", \"Ankit\", \"Rao\"], \"Age\" : [5, 2, 54]}) print(\"Original DataFrame :\")display(df) # function definitiondef highlight_cols(x): # copy df to new - original data is not changed df = x.copy() # select all values to yellow color df.loc[:, :] = 'background-color: yellow' # return color df return df print(\"Highlighted DataFrame :\")display(df.style.apply(highlight_cols, axis = None))",
"e": 28383,
"s": 27840,
"text": null
},
{
"code": null,
"e": 28392,
"s": 28383,
"text": "Output :"
},
{
"code": null,
"e": 28416,
"s": 28392,
"text": "Python pandas-dataFrame"
},
{
"code": null,
"e": 28430,
"s": 28416,
"text": "Python-pandas"
},
{
"code": null,
"e": 28437,
"s": 28430,
"text": "Python"
},
{
"code": null,
"e": 28535,
"s": 28437,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28553,
"s": 28535,
"text": "Python Dictionary"
},
{
"code": null,
"e": 28588,
"s": 28553,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 28620,
"s": 28588,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 28642,
"s": 28620,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 28672,
"s": 28642,
"text": "Iterate over a list in Python"
},
{
"code": null,
"e": 28698,
"s": 28672,
"text": "Python String | replace()"
},
{
"code": null,
"e": 28727,
"s": 28698,
"text": "*args and **kwargs in Python"
},
{
"code": null,
"e": 28771,
"s": 28727,
"text": "Reading and Writing to text files in Python"
},
{
"code": null,
"e": 28808,
"s": 28771,
"text": "Create a Pandas DataFrame from Lists"
}
] |
Type conversion in Java with Examples - GeeksforGeeks
|
22 Nov, 2021
Java provides various data types just likely any other dynamic languages such as boolean, char, int, unsigned int, signed int, float, double, long, etc in total providing 7 types where every datatype acquires different space while storing in memory. When you assign a value of one data type to another, the two types might not be compatible with each other. If the data types are compatible, then Java will perform the conversion automatically known as Automatic Type Conversion, and if not then they need to be cast or converted explicitly. For example, assigning an int value to a long variable.
Widening conversion takes place when two data types are automatically converted. This happens when:
The two data types are compatible.
When we assign a value of a smaller data type to a bigger data type.
For Example, in java, the numeric data types are compatible with each other but no automatic conversion is supported from numeric type to char or boolean. Also, char and boolean are not compatible with each other.
Example:
Java
// Java Program to Illustrate Automatic Type Conversion // Main classclass GFG { // Main driver method public static void main(String[] args) { int i = 100; // Automatic type conversion // Integer to long type long l = i; // Automatic type conversion // long to float type float f = l; // Print and display commands System.out.println("Int value " + i); System.out.println("Long value " + l); System.out.println("Float value " + f); }}
Int value 100
Long value 100
Float value 100.0
If we want to assign a value of a larger data type to a smaller data type we perform explicit type casting or narrowing.
This is useful for incompatible data types where automatic conversion cannot be done.
Here, the target type specifies the desired type to convert the specified value to.
char and number are not compatible with each other. Letβs see when we try to convert one into another.
Java
// Java program to illustrate Incompatible data Type// for Explicit Type Conversion // Main classpublic class GFG { // Main driver method public static void main(String[] argv) { // Declaring character variable char ch = 'c'; // Declaringinteger variable int num = 88; // Trying to insert integer to character ch = num; }}
Output: An error will be generated
This error is generated as an integer variable takes 4 bytes while character datatype requires 2 bytes. We are trying to plot data from 4 bytes into 2 bytes which is not possible.
How to do Explicit Conversion?
Java
// Java program to Illustrate Explicit Type Conversion // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Double datatype double d = 100.04; // Explicit type casting by forcefully getting // data from long datatype to integer type long l = (long)d; // Explicit type casting int i = (int)l; // Print statements System.out.println("Double value " + d); // While printing we will see that // fractional part lost System.out.println("Long value " + l); // While printing we will see that // fractional part lost System.out.println("Int value " + i); }}
Double value 100.04
Long value 100
Int value 100
Note: While assigning value to byte type the fractional part is lost and is reduced to modulo 256(range of byte).
Example:
Java
// Java Program to Illustrate Conversion of// Integer and Double to Byte // Main classclass GFG { // Main driver method public static void main(String args[]) { // Declaring byte variable byte b; // Declaring and initializing integer and double int i = 257; double d = 323.142; // Display message System.out.println("Conversion of int to byte."); // i % 256 b = (byte)i; // Print commands System.out.println("i = " + i + " b = " + b); System.out.println( "\nConversion of double to byte."); // d % 256 b = (byte)d; // Print commands System.out.println("d = " + d + " b= " + b); }}
Conversion of int to byte.
i = 257 b = 1
Conversion of double to byte.
d = 323.142 b= 67
While evaluating expressions, the intermediate value may exceed the range of operands and hence the expression value will be promoted. Some conditions for type promotion are:
Java automatically promotes each byte, short, or char operand to int when evaluating an expression.If one operand is long, float or double the whole expression is promoted to long, float, or double respectively.
Java automatically promotes each byte, short, or char operand to int when evaluating an expression.
If one operand is long, float or double the whole expression is promoted to long, float, or double respectively.
Example:
Java
// Java program to Illustrate Type promotion in Expressions // Main classclass GFG { // Main driver method public static void main(String args[]) { // Declaring and initializing primitive types byte b = 42; char c = 'a'; short s = 1024; int i = 50000; float f = 5.67f; double d = .1234; // The Expression double result = (f * b) + (i / c) - (d * s); // Printing the result obtained after // all the promotions are done System.out.println("result = " + result); }}
result = 626.7784146484375
While evaluating expressions, the result is automatically updated to a larger data type of the operand. But if we store that result in any smaller data type it generates a compile-time error, due to which we need to typecast the result.
Example:
Java
// Java program to Illustrate Type Casting// in Integer to Byte // Main classclass GFG { // Main driver method public static void main(String args[]) { // Declaring byte array byte b = 50; // Type casting int to byte b = (byte)(b * 2); // Display value in byte System.out.println(b); }}
100
Note: In case of single operands the result gets converted to int and then it is typecast accordingly, as in the above example.
This article is contributed by Apoorva Singh. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
vrk-19
sg4ipiafwot258z3lh6xa2mjq2qtxd89f49zgt7g
solankimayank
saurabh1990aror
jaganeeshwarofficialcontact
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Stream In Java
Interfaces in Java
ArrayList in Java
Initialize an ArrayList in Java
Stack Class in Java
Multidimensional Arrays in Java
Singleton Class in Java
Set in Java
Multithreading in Java
Collections in Java
|
[
{
"code": null,
"e": 25539,
"s": 25511,
"text": "\n22 Nov, 2021"
},
{
"code": null,
"e": 26138,
"s": 25539,
"text": "Java provides various data types just likely any other dynamic languages such as boolean, char, int, unsigned int, signed int, float, double, long, etc in total providing 7 types where every datatype acquires different space while storing in memory. When you assign a value of one data type to another, the two types might not be compatible with each other. If the data types are compatible, then Java will perform the conversion automatically known as Automatic Type Conversion, and if not then they need to be cast or converted explicitly. For example, assigning an int value to a long variable. "
},
{
"code": null,
"e": 26240,
"s": 26138,
"text": "Widening conversion takes place when two data types are automatically converted. This happens when: "
},
{
"code": null,
"e": 26275,
"s": 26240,
"text": "The two data types are compatible."
},
{
"code": null,
"e": 26344,
"s": 26275,
"text": "When we assign a value of a smaller data type to a bigger data type."
},
{
"code": null,
"e": 26559,
"s": 26344,
"text": "For Example, in java, the numeric data types are compatible with each other but no automatic conversion is supported from numeric type to char or boolean. Also, char and boolean are not compatible with each other. "
},
{
"code": null,
"e": 26568,
"s": 26559,
"text": "Example:"
},
{
"code": null,
"e": 26573,
"s": 26568,
"text": "Java"
},
{
"code": "// Java Program to Illustrate Automatic Type Conversion // Main classclass GFG { // Main driver method public static void main(String[] args) { int i = 100; // Automatic type conversion // Integer to long type long l = i; // Automatic type conversion // long to float type float f = l; // Print and display commands System.out.println(\"Int value \" + i); System.out.println(\"Long value \" + l); System.out.println(\"Float value \" + f); }}",
"e": 27102,
"s": 26573,
"text": null
},
{
"code": null,
"e": 27149,
"s": 27102,
"text": "Int value 100\nLong value 100\nFloat value 100.0"
},
{
"code": null,
"e": 27272,
"s": 27149,
"text": "If we want to assign a value of a larger data type to a smaller data type we perform explicit type casting or narrowing. "
},
{
"code": null,
"e": 27358,
"s": 27272,
"text": "This is useful for incompatible data types where automatic conversion cannot be done."
},
{
"code": null,
"e": 27443,
"s": 27358,
"text": "Here, the target type specifies the desired type to convert the specified value to. "
},
{
"code": null,
"e": 27547,
"s": 27443,
"text": "char and number are not compatible with each other. Letβs see when we try to convert one into another. "
},
{
"code": null,
"e": 27552,
"s": 27547,
"text": "Java"
},
{
"code": "// Java program to illustrate Incompatible data Type// for Explicit Type Conversion // Main classpublic class GFG { // Main driver method public static void main(String[] argv) { // Declaring character variable char ch = 'c'; // Declaringinteger variable int num = 88; // Trying to insert integer to character ch = num; }}",
"e": 27931,
"s": 27552,
"text": null
},
{
"code": null,
"e": 27966,
"s": 27931,
"text": "Output: An error will be generated"
},
{
"code": null,
"e": 28146,
"s": 27966,
"text": "This error is generated as an integer variable takes 4 bytes while character datatype requires 2 bytes. We are trying to plot data from 4 bytes into 2 bytes which is not possible."
},
{
"code": null,
"e": 28178,
"s": 28146,
"text": "How to do Explicit Conversion? "
},
{
"code": null,
"e": 28183,
"s": 28178,
"text": "Java"
},
{
"code": "// Java program to Illustrate Explicit Type Conversion // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Double datatype double d = 100.04; // Explicit type casting by forcefully getting // data from long datatype to integer type long l = (long)d; // Explicit type casting int i = (int)l; // Print statements System.out.println(\"Double value \" + d); // While printing we will see that // fractional part lost System.out.println(\"Long value \" + l); // While printing we will see that // fractional part lost System.out.println(\"Int value \" + i); }}",
"e": 28903,
"s": 28183,
"text": null
},
{
"code": null,
"e": 28952,
"s": 28903,
"text": "Double value 100.04\nLong value 100\nInt value 100"
},
{
"code": null,
"e": 29067,
"s": 28952,
"text": "Note: While assigning value to byte type the fractional part is lost and is reduced to modulo 256(range of byte). "
},
{
"code": null,
"e": 29076,
"s": 29067,
"text": "Example:"
},
{
"code": null,
"e": 29081,
"s": 29076,
"text": "Java"
},
{
"code": "// Java Program to Illustrate Conversion of// Integer and Double to Byte // Main classclass GFG { // Main driver method public static void main(String args[]) { // Declaring byte variable byte b; // Declaring and initializing integer and double int i = 257; double d = 323.142; // Display message System.out.println(\"Conversion of int to byte.\"); // i % 256 b = (byte)i; // Print commands System.out.println(\"i = \" + i + \" b = \" + b); System.out.println( \"\\nConversion of double to byte.\"); // d % 256 b = (byte)d; // Print commands System.out.println(\"d = \" + d + \" b= \" + b); }}",
"e": 29804,
"s": 29081,
"text": null
},
{
"code": null,
"e": 29894,
"s": 29804,
"text": "Conversion of int to byte.\ni = 257 b = 1\n\nConversion of double to byte.\nd = 323.142 b= 67"
},
{
"code": null,
"e": 30071,
"s": 29894,
"text": "While evaluating expressions, the intermediate value may exceed the range of operands and hence the expression value will be promoted. Some conditions for type promotion are: "
},
{
"code": null,
"e": 30283,
"s": 30071,
"text": "Java automatically promotes each byte, short, or char operand to int when evaluating an expression.If one operand is long, float or double the whole expression is promoted to long, float, or double respectively."
},
{
"code": null,
"e": 30383,
"s": 30283,
"text": "Java automatically promotes each byte, short, or char operand to int when evaluating an expression."
},
{
"code": null,
"e": 30496,
"s": 30383,
"text": "If one operand is long, float or double the whole expression is promoted to long, float, or double respectively."
},
{
"code": null,
"e": 30505,
"s": 30496,
"text": "Example:"
},
{
"code": null,
"e": 30510,
"s": 30505,
"text": "Java"
},
{
"code": "// Java program to Illustrate Type promotion in Expressions // Main classclass GFG { // Main driver method public static void main(String args[]) { // Declaring and initializing primitive types byte b = 42; char c = 'a'; short s = 1024; int i = 50000; float f = 5.67f; double d = .1234; // The Expression double result = (f * b) + (i / c) - (d * s); // Printing the result obtained after // all the promotions are done System.out.println(\"result = \" + result); }}",
"e": 31074,
"s": 30510,
"text": null
},
{
"code": null,
"e": 31101,
"s": 31074,
"text": "result = 626.7784146484375"
},
{
"code": null,
"e": 31339,
"s": 31101,
"text": "While evaluating expressions, the result is automatically updated to a larger data type of the operand. But if we store that result in any smaller data type it generates a compile-time error, due to which we need to typecast the result. "
},
{
"code": null,
"e": 31348,
"s": 31339,
"text": "Example:"
},
{
"code": null,
"e": 31353,
"s": 31348,
"text": "Java"
},
{
"code": "// Java program to Illustrate Type Casting// in Integer to Byte // Main classclass GFG { // Main driver method public static void main(String args[]) { // Declaring byte array byte b = 50; // Type casting int to byte b = (byte)(b * 2); // Display value in byte System.out.println(b); }}",
"e": 31698,
"s": 31353,
"text": null
},
{
"code": null,
"e": 31702,
"s": 31698,
"text": "100"
},
{
"code": null,
"e": 31830,
"s": 31702,
"text": "Note: In case of single operands the result gets converted to int and then it is typecast accordingly, as in the above example."
},
{
"code": null,
"e": 32252,
"s": 31830,
"text": "This article is contributed by Apoorva Singh. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above."
},
{
"code": null,
"e": 32259,
"s": 32252,
"text": "vrk-19"
},
{
"code": null,
"e": 32300,
"s": 32259,
"text": "sg4ipiafwot258z3lh6xa2mjq2qtxd89f49zgt7g"
},
{
"code": null,
"e": 32314,
"s": 32300,
"text": "solankimayank"
},
{
"code": null,
"e": 32330,
"s": 32314,
"text": "saurabh1990aror"
},
{
"code": null,
"e": 32358,
"s": 32330,
"text": "jaganeeshwarofficialcontact"
},
{
"code": null,
"e": 32363,
"s": 32358,
"text": "Java"
},
{
"code": null,
"e": 32368,
"s": 32363,
"text": "Java"
},
{
"code": null,
"e": 32466,
"s": 32368,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 32481,
"s": 32466,
"text": "Stream In Java"
},
{
"code": null,
"e": 32500,
"s": 32481,
"text": "Interfaces in Java"
},
{
"code": null,
"e": 32518,
"s": 32500,
"text": "ArrayList in Java"
},
{
"code": null,
"e": 32550,
"s": 32518,
"text": "Initialize an ArrayList in Java"
},
{
"code": null,
"e": 32570,
"s": 32550,
"text": "Stack Class in Java"
},
{
"code": null,
"e": 32602,
"s": 32570,
"text": "Multidimensional Arrays in Java"
},
{
"code": null,
"e": 32626,
"s": 32602,
"text": "Singleton Class in Java"
},
{
"code": null,
"e": 32638,
"s": 32626,
"text": "Set in Java"
},
{
"code": null,
"e": 32661,
"s": 32638,
"text": "Multithreading in Java"
}
] |
AngularJS | ng-selected Directive - GeeksforGeeks
|
28 Mar, 2019
The ng-selected Directive in AngularJS is used to specify the selected attribute of an HTML element. It can be used to select the default value specified on an HTML element. If the expression inside the ng-selected directive returns true then the selected option value will be display otherwise not display.
Syntax:
<element ng-selected="expression"> Contents... </element>
Example: This example uses ng-selected Directive to display the selected element.
<!DOCTYPE html><html> <head> <title>ng-selected Directive</title> <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.2/angular.min.js"> </script></head> <body ng-app style="text-align:center"> <h1 style="color:green">GeeksforGeeks</h1> <h2>ng-selected Directive</h2> Check to select default value: <input type="checkbox" ng-model="sort"> <br><br> <select name="geek" > <option value="1" >Merge sort</option> <option value="2" ng-selected="sort">Quick sort</option> <option value="3">Bubble sort</option> <option value="4">Insertion sort</option> </select></body> </html>
Output:Before checked the checkbox:After checked the checkbox:
AngularJS-Directives
AngularJS
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Angular PrimeNG Dropdown Component
Auth Guards in Angular 9/10/11
Angular PrimeNG Calendar Component
How to bundle an Angular app for production?
What is AOT and JIT Compiler in Angular ?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
How to insert spaces/tabs in text using HTML/CSS?
|
[
{
"code": null,
"e": 26006,
"s": 25978,
"text": "\n28 Mar, 2019"
},
{
"code": null,
"e": 26314,
"s": 26006,
"text": "The ng-selected Directive in AngularJS is used to specify the selected attribute of an HTML element. It can be used to select the default value specified on an HTML element. If the expression inside the ng-selected directive returns true then the selected option value will be display otherwise not display."
},
{
"code": null,
"e": 26322,
"s": 26314,
"text": "Syntax:"
},
{
"code": null,
"e": 26381,
"s": 26322,
"text": "<element ng-selected=\"expression\"> Contents... </element> "
},
{
"code": null,
"e": 26463,
"s": 26381,
"text": "Example: This example uses ng-selected Directive to display the selected element."
},
{
"code": "<!DOCTYPE html><html> <head> <title>ng-selected Directive</title> <script src=\"https://ajax.googleapis.com/ajax/libs/angularjs/1.4.2/angular.min.js\"> </script></head> <body ng-app style=\"text-align:center\"> <h1 style=\"color:green\">GeeksforGeeks</h1> <h2>ng-selected Directive</h2> Check to select default value: <input type=\"checkbox\" ng-model=\"sort\"> <br><br> <select name=\"geek\" > <option value=\"1\" >Merge sort</option> <option value=\"2\" ng-selected=\"sort\">Quick sort</option> <option value=\"3\">Bubble sort</option> <option value=\"4\">Insertion sort</option> </select></body> </html> ",
"e": 27165,
"s": 26463,
"text": null
},
{
"code": null,
"e": 27228,
"s": 27165,
"text": "Output:Before checked the checkbox:After checked the checkbox:"
},
{
"code": null,
"e": 27249,
"s": 27228,
"text": "AngularJS-Directives"
},
{
"code": null,
"e": 27259,
"s": 27249,
"text": "AngularJS"
},
{
"code": null,
"e": 27276,
"s": 27259,
"text": "Web Technologies"
},
{
"code": null,
"e": 27374,
"s": 27276,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27409,
"s": 27374,
"text": "Angular PrimeNG Dropdown Component"
},
{
"code": null,
"e": 27440,
"s": 27409,
"text": "Auth Guards in Angular 9/10/11"
},
{
"code": null,
"e": 27475,
"s": 27440,
"text": "Angular PrimeNG Calendar Component"
},
{
"code": null,
"e": 27520,
"s": 27475,
"text": "How to bundle an Angular app for production?"
},
{
"code": null,
"e": 27562,
"s": 27520,
"text": "What is AOT and JIT Compiler in Angular ?"
},
{
"code": null,
"e": 27602,
"s": 27562,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 27635,
"s": 27602,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 27680,
"s": 27635,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 27723,
"s": 27680,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
Comments in Shell Script - GeeksforGeeks
|
10 May, 2021
Comments are the useful information that the developers provide to make the reader understand the source code. It explains the logic or a part of it used in the code. Comments are usually helpful to someone maintaining or enhancing your code when you are no longer around to answer questions about it. These are often cited as a useful programming convention that does not take part in the output of the program but improves the readability of the whole program.
There are two types of comments:
Single-line commentMulti-line comment
Single-line comment
Multi-line comment
A single-line comment starts with hashtag symbol with no white spaces (#) and lasts till the end of the line. If the comment exceeds one line then put a hashtag on the next line and continue the comment.
The shell script is commented out prefixing # character for single-line comment.
Syntax
#This is a comment
Example:
#Single line comment
echo "hello world"
Output:
Multi-line comment is a piece of text enclosed in a delimiter (β) on each end of the comment. Again there should be no white space between delimiter (β β). They are useful when the comment text does not fit into one line; therefore need to span across lines. Multi-line comments or paragraphs serve as documentation for others reading your code. See the following code snippet demonstrating multi-line comment:
Syntax:
: '
This is a
Multi-line comments'
Example:
echo "multiline comments"
: '
Print some word'
Output:
meetgor
Shell Script
Linux-Unix
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
TCP Server-Client implementation in C
tar command in Linux with examples
curl command in Linux with Examples
Conditional Statements | Shell Script
UDP Server-Client implementation in C
Tail command in Linux with examples
Cat command in Linux with examples
touch command in Linux with Examples
echo command in Linux with Examples
Compiling with g++
|
[
{
"code": null,
"e": 25211,
"s": 25183,
"text": "\n10 May, 2021"
},
{
"code": null,
"e": 25674,
"s": 25211,
"text": "Comments are the useful information that the developers provide to make the reader understand the source code. It explains the logic or a part of it used in the code. Comments are usually helpful to someone maintaining or enhancing your code when you are no longer around to answer questions about it. These are often cited as a useful programming convention that does not take part in the output of the program but improves the readability of the whole program."
},
{
"code": null,
"e": 25707,
"s": 25674,
"text": "There are two types of comments:"
},
{
"code": null,
"e": 25745,
"s": 25707,
"text": "Single-line commentMulti-line comment"
},
{
"code": null,
"e": 25765,
"s": 25745,
"text": "Single-line comment"
},
{
"code": null,
"e": 25784,
"s": 25765,
"text": "Multi-line comment"
},
{
"code": null,
"e": 25988,
"s": 25784,
"text": "A single-line comment starts with hashtag symbol with no white spaces (#) and lasts till the end of the line. If the comment exceeds one line then put a hashtag on the next line and continue the comment."
},
{
"code": null,
"e": 26069,
"s": 25988,
"text": "The shell script is commented out prefixing # character for single-line comment."
},
{
"code": null,
"e": 26095,
"s": 26069,
"text": "Syntax\n#This is a comment"
},
{
"code": null,
"e": 26104,
"s": 26095,
"text": "Example:"
},
{
"code": null,
"e": 26144,
"s": 26104,
"text": "#Single line comment\necho \"hello world\""
},
{
"code": null,
"e": 26152,
"s": 26144,
"text": "Output:"
},
{
"code": null,
"e": 26563,
"s": 26152,
"text": "Multi-line comment is a piece of text enclosed in a delimiter (β) on each end of the comment. Again there should be no white space between delimiter (β β). They are useful when the comment text does not fit into one line; therefore need to span across lines. Multi-line comments or paragraphs serve as documentation for others reading your code. See the following code snippet demonstrating multi-line comment:"
},
{
"code": null,
"e": 26606,
"s": 26563,
"text": "Syntax:\n: '\nThis is a\nMulti-line comments'"
},
{
"code": null,
"e": 26615,
"s": 26606,
"text": "Example:"
},
{
"code": null,
"e": 26662,
"s": 26615,
"text": "echo \"multiline comments\"\n: '\nPrint some word'"
},
{
"code": null,
"e": 26670,
"s": 26662,
"text": "Output:"
},
{
"code": null,
"e": 26678,
"s": 26670,
"text": "meetgor"
},
{
"code": null,
"e": 26691,
"s": 26678,
"text": "Shell Script"
},
{
"code": null,
"e": 26702,
"s": 26691,
"text": "Linux-Unix"
},
{
"code": null,
"e": 26800,
"s": 26702,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26838,
"s": 26800,
"text": "TCP Server-Client implementation in C"
},
{
"code": null,
"e": 26873,
"s": 26838,
"text": "tar command in Linux with examples"
},
{
"code": null,
"e": 26909,
"s": 26873,
"text": "curl command in Linux with Examples"
},
{
"code": null,
"e": 26947,
"s": 26909,
"text": "Conditional Statements | Shell Script"
},
{
"code": null,
"e": 26985,
"s": 26947,
"text": "UDP Server-Client implementation in C"
},
{
"code": null,
"e": 27021,
"s": 26985,
"text": "Tail command in Linux with examples"
},
{
"code": null,
"e": 27056,
"s": 27021,
"text": "Cat command in Linux with examples"
},
{
"code": null,
"e": 27093,
"s": 27056,
"text": "touch command in Linux with Examples"
},
{
"code": null,
"e": 27129,
"s": 27093,
"text": "echo command in Linux with Examples"
}
] |
How to disable buttons using AngularJS ? - GeeksforGeeks
|
30 Sep, 2020
Sometimes we need to disable the button, link on the click event. In this article, we will see how to do that with the help of AngularJS.
Approach:
The approach is to use the ng-disabled directive to disable a particular button.
In the first example a single button is disabled by the click and In the second multiple buttons can be disabled by a single click.
Example 1:
HTML
<!DOCTYPE HTML> <html> <head> <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular.min.js"> </script> <script> var myApp = angular.module("app", []); myApp.controller("controller", function($scope) { $scope.disabledFlag = false; $scope.disableIt = function() { $scope.disabledFlag = true; }; }); </script></head> <body style = "text-align:center;"> <h1 style = "color:green;"> GeeksForGeeks </h1> <p> Disable the button in AngularJS </p> <div ng-app="app"> <div ng-controller="controller"> <button ng-click='disableIt()' ng-disabled='disabledFlag' > Click to disable </button> </div> </div></body> </html>
Output:
Example 2:
HTML
<!DOCTYPE HTML> <html> <head> <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular.min.js"> </script> <script> var myApp = angular.module("app", []); myApp.controller("controller", function($scope) { $scope.disabledFlag1 = false; $scope.disableIt1 = function() { $scope.disabledFlag1 = true; }; $scope.disabledFlag2 = false; $scope.disableIt2 = function() { $scope.disabledFlag2 = true; }; $scope.disabledFlag3 = false; $scope.disableIt3 = function() { $scope.disabledFlag3 = true; }; $scope.disabledFlag = false; $scope.disableIt = function() { $scope.disabledFlag1 = true; $scope.disabledFlag2 = true; $scope.disabledFlag3 = true; }; }); </script></head> <body style = "text-align:center;"> <h1 style = "color:green;"> GeeksForGeeks </h1> <p> Disable the button in AngularJS </p> <div ng-app="app"> <div ng-controller="controller"> <button ng-click='disableIt1()' ng-disabled='disabledFlag1' > disable it </button> <button ng-click='disableIt2()' ng-disabled='disabledFlag2' > disable it </button> <button ng-click='disableIt3()' ng-disabled='disabledFlag3' > disable it </button> <br> <br> <button ng-click='disableIt()' ng-disabled='disabledFlag' > disable All </button> </div> </div></body> </html>
Output:
Attention reader! Donβt stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course.
AngularJS-Misc
HTML-Misc
AngularJS
HTML
Web Technologies
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Angular PrimeNG Dropdown Component
Auth Guards in Angular 9/10/11
What is AOT and JIT Compiler in Angular ?
Angular PrimeNG Calendar Component
How to bundle an Angular app for production?
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to insert spaces/tabs in text using HTML/CSS?
How to update Node.js and NPM to next version ?
How to set the default value for an HTML <select> element ?
Hide or show elements in HTML using display property
|
[
{
"code": null,
"e": 25834,
"s": 25806,
"text": "\n30 Sep, 2020"
},
{
"code": null,
"e": 25972,
"s": 25834,
"text": "Sometimes we need to disable the button, link on the click event. In this article, we will see how to do that with the help of AngularJS."
},
{
"code": null,
"e": 25983,
"s": 25972,
"text": "Approach: "
},
{
"code": null,
"e": 26065,
"s": 25983,
"text": "The approach is to use the ng-disabled directive to disable a particular button. "
},
{
"code": null,
"e": 26197,
"s": 26065,
"text": "In the first example a single button is disabled by the click and In the second multiple buttons can be disabled by a single click."
},
{
"code": null,
"e": 26208,
"s": 26197,
"text": "Example 1:"
},
{
"code": null,
"e": 26213,
"s": 26208,
"text": "HTML"
},
{
"code": "<!DOCTYPE HTML> <html> <head> <script src=\"https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular.min.js\"> </script> <script> var myApp = angular.module(\"app\", []); myApp.controller(\"controller\", function($scope) { $scope.disabledFlag = false; $scope.disableIt = function() { $scope.disabledFlag = true; }; }); </script></head> <body style = \"text-align:center;\"> <h1 style = \"color:green;\"> GeeksForGeeks </h1> <p> Disable the button in AngularJS </p> <div ng-app=\"app\"> <div ng-controller=\"controller\"> <button ng-click='disableIt()' ng-disabled='disabledFlag' > Click to disable </button> </div> </div></body> </html>",
"e": 27048,
"s": 26213,
"text": null
},
{
"code": null,
"e": 27056,
"s": 27048,
"text": "Output:"
},
{
"code": null,
"e": 27067,
"s": 27056,
"text": "Example 2:"
},
{
"code": null,
"e": 27072,
"s": 27067,
"text": "HTML"
},
{
"code": "<!DOCTYPE HTML> <html> <head> <script src=\"https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular.min.js\"> </script> <script> var myApp = angular.module(\"app\", []); myApp.controller(\"controller\", function($scope) { $scope.disabledFlag1 = false; $scope.disableIt1 = function() { $scope.disabledFlag1 = true; }; $scope.disabledFlag2 = false; $scope.disableIt2 = function() { $scope.disabledFlag2 = true; }; $scope.disabledFlag3 = false; $scope.disableIt3 = function() { $scope.disabledFlag3 = true; }; $scope.disabledFlag = false; $scope.disableIt = function() { $scope.disabledFlag1 = true; $scope.disabledFlag2 = true; $scope.disabledFlag3 = true; }; }); </script></head> <body style = \"text-align:center;\"> <h1 style = \"color:green;\"> GeeksForGeeks </h1> <p> Disable the button in AngularJS </p> <div ng-app=\"app\"> <div ng-controller=\"controller\"> <button ng-click='disableIt1()' ng-disabled='disabledFlag1' > disable it </button> <button ng-click='disableIt2()' ng-disabled='disabledFlag2' > disable it </button> <button ng-click='disableIt3()' ng-disabled='disabledFlag3' > disable it </button> <br> <br> <button ng-click='disableIt()' ng-disabled='disabledFlag' > disable All </button> </div> </div></body> </html>",
"e": 28857,
"s": 27072,
"text": null
},
{
"code": null,
"e": 28865,
"s": 28857,
"text": "Output:"
},
{
"code": null,
"e": 29002,
"s": 28865,
"text": "Attention reader! Donβt stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course."
},
{
"code": null,
"e": 29017,
"s": 29002,
"text": "AngularJS-Misc"
},
{
"code": null,
"e": 29027,
"s": 29017,
"text": "HTML-Misc"
},
{
"code": null,
"e": 29037,
"s": 29027,
"text": "AngularJS"
},
{
"code": null,
"e": 29042,
"s": 29037,
"text": "HTML"
},
{
"code": null,
"e": 29059,
"s": 29042,
"text": "Web Technologies"
},
{
"code": null,
"e": 29064,
"s": 29059,
"text": "HTML"
},
{
"code": null,
"e": 29162,
"s": 29064,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 29197,
"s": 29162,
"text": "Angular PrimeNG Dropdown Component"
},
{
"code": null,
"e": 29228,
"s": 29197,
"text": "Auth Guards in Angular 9/10/11"
},
{
"code": null,
"e": 29270,
"s": 29228,
"text": "What is AOT and JIT Compiler in Angular ?"
},
{
"code": null,
"e": 29305,
"s": 29270,
"text": "Angular PrimeNG Calendar Component"
},
{
"code": null,
"e": 29350,
"s": 29305,
"text": "How to bundle an Angular app for production?"
},
{
"code": null,
"e": 29412,
"s": 29350,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 29462,
"s": 29412,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
},
{
"code": null,
"e": 29510,
"s": 29462,
"text": "How to update Node.js and NPM to next version ?"
},
{
"code": null,
"e": 29570,
"s": 29510,
"text": "How to set the default value for an HTML <select> element ?"
}
] |
How to insert data in the map of strings? - GeeksforGeeks
|
14 Dec, 2020
Maps are associative containers that store elements in a specific order. It stores elements in a combination of key values and mapped values.
Syntax:
map<data type of key, data type of value> M
To use the above syntax for the map in C++, it is important to include the below header file:Header File:
#include <map>
To insert the data in the map insert() function in the map is used. It is used to insert elements with a particular key in the map container.
Syntax:
iterator map_name.insert({key, element})
Parameters: It accepts a pair that consists of a key and element which is to be inserted into the map container but it only inserts the unique key. This means that the function does not insert the key and element in the map if the key already exists in the map.
Return Value: It returns an iterator pointing to the new element in the map.
Below is the program to illustrate the same:
C++
// C++ program to store the string as// the map value#include <iostream>#include <map>using namespace std; // Driver codeint main(){ // Get the Strings string s = "abc"; string s1 = "bca"; string s2 = "cba"; // Declare map with both value // and key having string data_type map<string, string> m; // Insert the string in the map m.insert(pair<string, string>(s1, s)); m.insert(pair<string, string>(s, s2)); // Print the elements stored // in the map for (auto itr = m.begin(); itr != m.end(); ++itr) { cout << itr->first << '\t' << itr->second << '\n'; } return 0;}
abc cba
bca abc
There is another way to store the data in the map, below is the syntax for the same:
Syntax:
iterator map_name.insert(iterator position, {key, element})
Parameters: The function accepts two parameters which are described below:
{key, element}: This specifies a pair that consists of a key and element which is to be inserted into the map container.position: It only points to a position from where the searching operation for insertion is to be started to make the process faster. The insertion is done according to the order which is followed by the container.
{key, element}: This specifies a pair that consists of a key and element which is to be inserted into the map container.
position: It only points to a position from where the searching operation for insertion is to be started to make the process faster. The insertion is done according to the order which is followed by the container.
Return Value: The function returns an iterator pointing to the new element in the container.
Below is the program to illustrate the same:
C++
// C++ program to illustrate the map// insert(iteratorposition, {key, element})#include <iostream>#include <map>using namespace std; // Driver Codeint main(){ // Initialize a Map mp map<string, int> mp; // Insert elements in random order mp.insert({ "abc", 30 }); mp.insert({ "bcd", 40 }); auto it = mp.find("bcd"); // Insert {"dcd", 60} starting the // search from position where 2 // is present mp.insert(it, { "dcd", 60 }); // Print the element cout << "KEY\tELEMENT\n"; for (auto itr = mp.begin(); itr != mp.end(); ++itr) { cout << itr->first << '\t' << itr->second << '\n'; } return 0;}
KEY ELEMENT
abc 30
bcd 40
dcd 60
cpp-map
STL
C++
C++ Programs
Strings
Strings
STL
CPP
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Operator Overloading in C++
Polymorphism in C++
Friend class and function in C++
Sorting a vector in C++
std::string class in C++
Header files in C/C++ and its uses
Program to print ASCII Value of a character
How to return multiple values from a function in C or C++?
C++ Program for QuickSort
Sorting a Map by value in C++ STL
|
[
{
"code": null,
"e": 25369,
"s": 25341,
"text": "\n14 Dec, 2020"
},
{
"code": null,
"e": 25511,
"s": 25369,
"text": "Maps are associative containers that store elements in a specific order. It stores elements in a combination of key values and mapped values."
},
{
"code": null,
"e": 25519,
"s": 25511,
"text": "Syntax:"
},
{
"code": null,
"e": 25563,
"s": 25519,
"text": "map<data type of key, data type of value> M"
},
{
"code": null,
"e": 25669,
"s": 25563,
"text": "To use the above syntax for the map in C++, it is important to include the below header file:Header File:"
},
{
"code": null,
"e": 25684,
"s": 25669,
"text": "#include <map>"
},
{
"code": null,
"e": 25826,
"s": 25684,
"text": "To insert the data in the map insert() function in the map is used. It is used to insert elements with a particular key in the map container."
},
{
"code": null,
"e": 25834,
"s": 25826,
"text": "Syntax:"
},
{
"code": null,
"e": 25875,
"s": 25834,
"text": "iterator map_name.insert({key, element})"
},
{
"code": null,
"e": 26137,
"s": 25875,
"text": "Parameters: It accepts a pair that consists of a key and element which is to be inserted into the map container but it only inserts the unique key. This means that the function does not insert the key and element in the map if the key already exists in the map."
},
{
"code": null,
"e": 26214,
"s": 26137,
"text": "Return Value: It returns an iterator pointing to the new element in the map."
},
{
"code": null,
"e": 26259,
"s": 26214,
"text": "Below is the program to illustrate the same:"
},
{
"code": null,
"e": 26263,
"s": 26259,
"text": "C++"
},
{
"code": "// C++ program to store the string as// the map value#include <iostream>#include <map>using namespace std; // Driver codeint main(){ // Get the Strings string s = \"abc\"; string s1 = \"bca\"; string s2 = \"cba\"; // Declare map with both value // and key having string data_type map<string, string> m; // Insert the string in the map m.insert(pair<string, string>(s1, s)); m.insert(pair<string, string>(s, s2)); // Print the elements stored // in the map for (auto itr = m.begin(); itr != m.end(); ++itr) { cout << itr->first << '\\t' << itr->second << '\\n'; } return 0;}",
"e": 26911,
"s": 26263,
"text": null
},
{
"code": null,
"e": 26934,
"s": 26911,
"text": "abc cba\nbca abc\n"
},
{
"code": null,
"e": 27019,
"s": 26934,
"text": "There is another way to store the data in the map, below is the syntax for the same:"
},
{
"code": null,
"e": 27027,
"s": 27019,
"text": "Syntax:"
},
{
"code": null,
"e": 27087,
"s": 27027,
"text": "iterator map_name.insert(iterator position, {key, element})"
},
{
"code": null,
"e": 27164,
"s": 27087,
"text": "Parameters: The function accepts two parameters which are described below: "
},
{
"code": null,
"e": 27498,
"s": 27164,
"text": "{key, element}: This specifies a pair that consists of a key and element which is to be inserted into the map container.position: It only points to a position from where the searching operation for insertion is to be started to make the process faster. The insertion is done according to the order which is followed by the container."
},
{
"code": null,
"e": 27619,
"s": 27498,
"text": "{key, element}: This specifies a pair that consists of a key and element which is to be inserted into the map container."
},
{
"code": null,
"e": 27833,
"s": 27619,
"text": "position: It only points to a position from where the searching operation for insertion is to be started to make the process faster. The insertion is done according to the order which is followed by the container."
},
{
"code": null,
"e": 27926,
"s": 27833,
"text": "Return Value: The function returns an iterator pointing to the new element in the container."
},
{
"code": null,
"e": 27971,
"s": 27926,
"text": "Below is the program to illustrate the same:"
},
{
"code": null,
"e": 27975,
"s": 27971,
"text": "C++"
},
{
"code": "// C++ program to illustrate the map// insert(iteratorposition, {key, element})#include <iostream>#include <map>using namespace std; // Driver Codeint main(){ // Initialize a Map mp map<string, int> mp; // Insert elements in random order mp.insert({ \"abc\", 30 }); mp.insert({ \"bcd\", 40 }); auto it = mp.find(\"bcd\"); // Insert {\"dcd\", 60} starting the // search from position where 2 // is present mp.insert(it, { \"dcd\", 60 }); // Print the element cout << \"KEY\\tELEMENT\\n\"; for (auto itr = mp.begin(); itr != mp.end(); ++itr) { cout << itr->first << '\\t' << itr->second << '\\n'; } return 0;}",
"e": 28652,
"s": 27975,
"text": null
},
{
"code": null,
"e": 28698,
"s": 28652,
"text": "KEY ELEMENT\nabc 30\nbcd 40\ndcd 60\n"
},
{
"code": null,
"e": 28706,
"s": 28698,
"text": "cpp-map"
},
{
"code": null,
"e": 28710,
"s": 28706,
"text": "STL"
},
{
"code": null,
"e": 28714,
"s": 28710,
"text": "C++"
},
{
"code": null,
"e": 28727,
"s": 28714,
"text": "C++ Programs"
},
{
"code": null,
"e": 28735,
"s": 28727,
"text": "Strings"
},
{
"code": null,
"e": 28743,
"s": 28735,
"text": "Strings"
},
{
"code": null,
"e": 28747,
"s": 28743,
"text": "STL"
},
{
"code": null,
"e": 28751,
"s": 28747,
"text": "CPP"
},
{
"code": null,
"e": 28849,
"s": 28751,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28877,
"s": 28849,
"text": "Operator Overloading in C++"
},
{
"code": null,
"e": 28897,
"s": 28877,
"text": "Polymorphism in C++"
},
{
"code": null,
"e": 28930,
"s": 28897,
"text": "Friend class and function in C++"
},
{
"code": null,
"e": 28954,
"s": 28930,
"text": "Sorting a vector in C++"
},
{
"code": null,
"e": 28979,
"s": 28954,
"text": "std::string class in C++"
},
{
"code": null,
"e": 29014,
"s": 28979,
"text": "Header files in C/C++ and its uses"
},
{
"code": null,
"e": 29058,
"s": 29014,
"text": "Program to print ASCII Value of a character"
},
{
"code": null,
"e": 29117,
"s": 29058,
"text": "How to return multiple values from a function in C or C++?"
},
{
"code": null,
"e": 29143,
"s": 29117,
"text": "C++ Program for QuickSort"
}
] |
Count greater nodes in AVL tree - GeeksforGeeks
|
19 Apr, 2022
In this article we will see that how to calculate number of elements which are greater than given value in AVL tree. Examples:
Input : x = 5
Root of below AVL tree
9
/ \
1 10
/ \ \
0 5 11
/ / \
-1 2 6
Output : 4
Explanation: there are 4 values which are
greater than 5 in AVL tree which are 6, 9,
10 and 11.
Prerequisites :
Insertion in AVL tree
Deletion in AVL tree
1. We maintain an extra field βdescβ for storing the number of descendant nodes for every node. Like for above example node having value 5 has a desc field value equal to 2. 2. for calculating the number of nodes which are greater than given value we simply traverse the tree. While traversing three cases can occur- I Case- x(given value) is greater than the value of current node. So, we go to the right child of the current node. II Case- x is lesser than the value of current node. we increase the current count by number of successors of the right child of the current node and then again add two to the current count(one for the current node and one for the right child.). In this step first, we make sure that right child exists or not. Then we move to left child of current node. III Case-x is equal to the value of current node. In this case we add the value of desc field of right child of current node to current count and then add one to it (for counting right child). Also in this case we see that right child exists or not. Calculating values of desc field
Insertion β When we insert a node we increment one to child field of every predecessor of the new node. In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes.Deletion β When we delete a node then we decrement one from every predecessor node of deleted node. Again, In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes.
Insertion β When we insert a node we increment one to child field of every predecessor of the new node. In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes.
Deletion β When we delete a node then we decrement one from every predecessor node of deleted node. Again, In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes.
C
// C program to find number of elements// greater than a given value in AVL#include <stdio.h>#include <stdlib.h>struct Node { int key; struct Node* left, *right; int height; int desc;}; int height(struct Node* N){ if (N == NULL) return 0; return N->height;} // A utility function to get maximum// of two integersint max(int a, int b){ return (a > b) ? a : b;} struct Node* newNode(int key){ struct Node* node = (struct Node*) malloc(sizeof(struct Node)); node->key = key; node->left = NULL; node->right = NULL; node->height = 1; // initially added at leaf node->desc = 0; return (node);} // A utility function to right rotate subtree// rooted with ystruct Node* rightRotate(struct Node* y){ struct Node* x = y->left; struct Node* T2 = x->right; // Perform rotation x->right = y; y->left = T2; // Update heights y->height = max(height(y->left), height(y->right)) + 1; x->height = max(height(x->left), height(x->right)) + 1; // calculate the number of children of x and y // which are changed due to rotation. int val = (T2 != NULL) ? T2->desc : -1; y->desc = y->desc - (x->desc + 1) + (val + 1); x->desc = x->desc - (val + 1) + (y->desc + 1); return x;} // A utility function to left rotate subtree rooted// with xstruct Node* leftRotate(struct Node* x){ struct Node* y = x->right; struct Node* T2 = y->left; // Perform rotation y->left = x; x->right = T2; // Update heights x->height = max(height(x->left), height(x->right)) + 1; y->height = max(height(y->left), height(y->right)) + 1; // calculate the number of children of x and y // which are changed due to rotation. int val = (T2 != NULL) ? T2->desc : -1; x->desc = x->desc - (y->desc + 1) + (val + 1); y->desc = y->desc - (val + 1) + (x->desc + 1); return y;} // Get Balance factor of node Nint getBalance(struct Node* N){ if (N == NULL) return 0; return height(N->left) - height(N->right);} struct Node* insert(struct Node* node, int key){ /* 1. Perform the normal BST rotation */ if (node == NULL) return (newNode(key)); if (key < node->key) { node->left = insert(node->left, key); node->desc++; } else if (key > node->key) { node->right = insert(node->right, key); node->desc++; } else // Equal keys not allowed return node; /* 2. Update height of this ancestor node */ node->height = 1 + max(height(node->left), height(node->right)); /* 3. Get the balance factor of this ancestor node to check whether this node became unbalanced */ int balance = getBalance(node); // If node becomes unbalanced, 4 cases arise // Left Left Case if (balance > 1 && key < node->left->key) return rightRotate(node); // Right Right Case if (balance < -1 && key > node->right->key) return leftRotate(node); // Left Right Case if (balance > 1 && key > node->left->key) { node->left = leftRotate(node->left); return rightRotate(node); } // Right Left Case if (balance < -1 && key < node->right->key) { node->right = rightRotate(node->right); return leftRotate(node); } /* return the (unchanged) node pointer */ return node;} /* Given a non-empty binary search tree, return the node with minimum key value found in that tree. Note that the entire tree does not need to be searched. */struct Node* minValueNode(struct Node* node){ struct Node* current = node; /* loop down to find the leftmost leaf */ while (current->left != NULL) current = current->left; return current;} // Recursive function to delete a node with given key// from subtree with given root. It returns root of// the modified subtree.struct Node* deleteNode(struct Node* root, int key){ // STEP 1: PERFORM STANDARD BST DELETE if (root == NULL) return root; // If the key to be deleted is smaller than the // root's key, then it lies in left subtree if (key < root->key) { root->left = deleteNode(root->left, key); root->desc = root->desc - 1; } // If the key to be deleted is greater than the // root's key, then it lies in right subtree else if (key > root->key) { root->right = deleteNode(root->right, key); root->desc = root->desc - 1; } // if key is same as root's key, then This is // the node to be deleted else { // node with only one child or no child if ((root->left == NULL) || (root->right == NULL)) { struct Node* temp = root->left ? root->left : root->right; // No child case if (temp == NULL) { temp = root; root = NULL; free(temp); } else // One child case { *root = *temp; // Copy the contents of // the non-empty child free(temp); } } else { // node with two children: Get the inorder // successor (smallest in the right subtree) struct Node* temp = minValueNode(root->right); // Copy the inorder successor's data to this node root->key = temp->key; // Delete the inorder successor root->right = deleteNode(root->right, temp->key); root->desc = root->desc - 1; } } // If the tree had only one node then return if (root == NULL) return root; // STEP 2: UPDATE HEIGHT OF THE CURRENT NODE root->height = 1 + max(height(root->left), height(root->right)); // STEP 3: GET THE BALANCE FACTOR OF THIS NODE (to // check whether this node became unbalanced) int balance = getBalance(root); // If this node becomes unbalanced, 4 cases arise // Left Left Case if (balance > 1 && getBalance(root->left) >= 0) return rightRotate(root); // Left Right Case if (balance > 1 && getBalance(root->left) < 0) { root->left = leftRotate(root->left); return rightRotate(root); } // Right Right Case if (balance < -1 && getBalance(root->right) <= 0) return leftRotate(root); // Right Left Case if (balance < -1 && getBalance(root->right) > 0) { root->right = rightRotate(root->right); return leftRotate(root); } return root;} // A utility function to print preorder traversal of// the tree.void preOrder(struct Node* root){ if (root != NULL) { printf("%d ", root->key); preOrder(root->left); preOrder(root->right); }} // Returns count ofint CountGreater(struct Node* root, int x){ int res = 0; // Search for x. While searching, keep // updating res if x is greater than // current node. while (root != NULL) { int desc = (root->right != NULL) ? root->right->desc : -1; if (root->key > x) { res = res + desc + 1 + 1; root = root->left; } else if (root->key < x) root = root->right; else { res = res + desc + 1; break; } } return res;} /* Driver program to test above function*/int main(){ struct Node* root = NULL; root = insert(root, 9); root = insert(root, 5); root = insert(root, 10); root = insert(root, 0); root = insert(root, 6); root = insert(root, 11); root = insert(root, -1); root = insert(root, 1); root = insert(root, 2); /* The constructed AVL Tree would be 9 / \ 1 10 / \ \ 0 5 11 / / \ -1 2 6 */ printf("Preorder traversal of the constructed AVL " "tree is \n"); preOrder(root); printf("\nNumber of elements greater than 9 are %d", CountGreater(root, 9)); root = deleteNode(root, 10); /* The AVL Tree after deletion of 10 1 / \ 0 9 / / \ -1 5 11 / \ 2 6 */ printf("\nPreorder traversal after deletion of 10 \n"); preOrder(root); printf("\nNumber of elements greater than 9 are %d", CountGreater(root, 9)); return 0;}
Output:
Preorder traversal of the constructed AVL tree is
9 1 0 -1 5 2 6 10 11
Number of elements greater than 9 are 2
Preorder traversal after deletion of 10
1 0 -1 9 5 2 6 11
Number of elements greater than 9 are 1
Time Complexity: Time complexity of CountGreater function is O(log(n)) where n is number of nodes in avl tree, as we are basically searching for the given number in avl which takes O(log(n)) time. This article is contributed by Ashish Sharma. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
rkbhola5
AVL-Tree
Advanced Data Structure
Binary Search Tree
Binary Search Tree
AVL-Tree
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Ordered Set and GNU C++ PBDS
2-3 Trees | (Search, Insert and Deletion)
Extendible Hashing (Dynamic approach to DBMS)
Suffix Array | Set 1 (Introduction)
Interval Tree
Binary Search Tree | Set 1 (Search and Insertion)
Binary Search Tree | Set 2 (Delete)
A program to check if a binary tree is BST or not
Construct BST from given preorder traversal | Set 1
Sorted Array to Balanced BST
|
[
{
"code": null,
"e": 25731,
"s": 25703,
"text": "\n19 Apr, 2022"
},
{
"code": null,
"e": 25858,
"s": 25731,
"text": "In this article we will see that how to calculate number of elements which are greater than given value in AVL tree. Examples:"
},
{
"code": null,
"e": 26131,
"s": 25858,
"text": "Input : x = 5\n Root of below AVL tree\n 9\n / \\\n 1 10\n / \\ \\\n 0 5 11\n / / \\\n -1 2 6\nOutput : 4\n\nExplanation: there are 4 values which are \ngreater than 5 in AVL tree which are 6, 9, \n10 and 11."
},
{
"code": null,
"e": 26147,
"s": 26131,
"text": "Prerequisites :"
},
{
"code": null,
"e": 26169,
"s": 26147,
"text": "Insertion in AVL tree"
},
{
"code": null,
"e": 26190,
"s": 26169,
"text": "Deletion in AVL tree"
},
{
"code": null,
"e": 27261,
"s": 26190,
"text": "1. We maintain an extra field βdescβ for storing the number of descendant nodes for every node. Like for above example node having value 5 has a desc field value equal to 2. 2. for calculating the number of nodes which are greater than given value we simply traverse the tree. While traversing three cases can occur- I Case- x(given value) is greater than the value of current node. So, we go to the right child of the current node. II Case- x is lesser than the value of current node. we increase the current count by number of successors of the right child of the current node and then again add two to the current count(one for the current node and one for the right child.). In this step first, we make sure that right child exists or not. Then we move to left child of current node. III Case-x is equal to the value of current node. In this case we add the value of desc field of right child of current node to current count and then add one to it (for counting right child). Also in this case we see that right child exists or not. Calculating values of desc field"
},
{
"code": null,
"e": 27693,
"s": 27261,
"text": "Insertion β When we insert a node we increment one to child field of every predecessor of the new node. In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes.Deletion β When we delete a node then we decrement one from every predecessor node of deleted node. Again, In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes."
},
{
"code": null,
"e": 27908,
"s": 27693,
"text": "Insertion β When we insert a node we increment one to child field of every predecessor of the new node. In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes."
},
{
"code": null,
"e": 28126,
"s": 27908,
"text": "Deletion β When we delete a node then we decrement one from every predecessor node of deleted node. Again, In the leftRotate and rightRotate functions we make appropriate changes in the value of child fields of nodes."
},
{
"code": null,
"e": 28128,
"s": 28126,
"text": "C"
},
{
"code": "// C program to find number of elements// greater than a given value in AVL#include <stdio.h>#include <stdlib.h>struct Node { int key; struct Node* left, *right; int height; int desc;}; int height(struct Node* N){ if (N == NULL) return 0; return N->height;} // A utility function to get maximum// of two integersint max(int a, int b){ return (a > b) ? a : b;} struct Node* newNode(int key){ struct Node* node = (struct Node*) malloc(sizeof(struct Node)); node->key = key; node->left = NULL; node->right = NULL; node->height = 1; // initially added at leaf node->desc = 0; return (node);} // A utility function to right rotate subtree// rooted with ystruct Node* rightRotate(struct Node* y){ struct Node* x = y->left; struct Node* T2 = x->right; // Perform rotation x->right = y; y->left = T2; // Update heights y->height = max(height(y->left), height(y->right)) + 1; x->height = max(height(x->left), height(x->right)) + 1; // calculate the number of children of x and y // which are changed due to rotation. int val = (T2 != NULL) ? T2->desc : -1; y->desc = y->desc - (x->desc + 1) + (val + 1); x->desc = x->desc - (val + 1) + (y->desc + 1); return x;} // A utility function to left rotate subtree rooted// with xstruct Node* leftRotate(struct Node* x){ struct Node* y = x->right; struct Node* T2 = y->left; // Perform rotation y->left = x; x->right = T2; // Update heights x->height = max(height(x->left), height(x->right)) + 1; y->height = max(height(y->left), height(y->right)) + 1; // calculate the number of children of x and y // which are changed due to rotation. int val = (T2 != NULL) ? T2->desc : -1; x->desc = x->desc - (y->desc + 1) + (val + 1); y->desc = y->desc - (val + 1) + (x->desc + 1); return y;} // Get Balance factor of node Nint getBalance(struct Node* N){ if (N == NULL) return 0; return height(N->left) - height(N->right);} struct Node* insert(struct Node* node, int key){ /* 1. Perform the normal BST rotation */ if (node == NULL) return (newNode(key)); if (key < node->key) { node->left = insert(node->left, key); node->desc++; } else if (key > node->key) { node->right = insert(node->right, key); node->desc++; } else // Equal keys not allowed return node; /* 2. Update height of this ancestor node */ node->height = 1 + max(height(node->left), height(node->right)); /* 3. Get the balance factor of this ancestor node to check whether this node became unbalanced */ int balance = getBalance(node); // If node becomes unbalanced, 4 cases arise // Left Left Case if (balance > 1 && key < node->left->key) return rightRotate(node); // Right Right Case if (balance < -1 && key > node->right->key) return leftRotate(node); // Left Right Case if (balance > 1 && key > node->left->key) { node->left = leftRotate(node->left); return rightRotate(node); } // Right Left Case if (balance < -1 && key < node->right->key) { node->right = rightRotate(node->right); return leftRotate(node); } /* return the (unchanged) node pointer */ return node;} /* Given a non-empty binary search tree, return the node with minimum key value found in that tree. Note that the entire tree does not need to be searched. */struct Node* minValueNode(struct Node* node){ struct Node* current = node; /* loop down to find the leftmost leaf */ while (current->left != NULL) current = current->left; return current;} // Recursive function to delete a node with given key// from subtree with given root. It returns root of// the modified subtree.struct Node* deleteNode(struct Node* root, int key){ // STEP 1: PERFORM STANDARD BST DELETE if (root == NULL) return root; // If the key to be deleted is smaller than the // root's key, then it lies in left subtree if (key < root->key) { root->left = deleteNode(root->left, key); root->desc = root->desc - 1; } // If the key to be deleted is greater than the // root's key, then it lies in right subtree else if (key > root->key) { root->right = deleteNode(root->right, key); root->desc = root->desc - 1; } // if key is same as root's key, then This is // the node to be deleted else { // node with only one child or no child if ((root->left == NULL) || (root->right == NULL)) { struct Node* temp = root->left ? root->left : root->right; // No child case if (temp == NULL) { temp = root; root = NULL; free(temp); } else // One child case { *root = *temp; // Copy the contents of // the non-empty child free(temp); } } else { // node with two children: Get the inorder // successor (smallest in the right subtree) struct Node* temp = minValueNode(root->right); // Copy the inorder successor's data to this node root->key = temp->key; // Delete the inorder successor root->right = deleteNode(root->right, temp->key); root->desc = root->desc - 1; } } // If the tree had only one node then return if (root == NULL) return root; // STEP 2: UPDATE HEIGHT OF THE CURRENT NODE root->height = 1 + max(height(root->left), height(root->right)); // STEP 3: GET THE BALANCE FACTOR OF THIS NODE (to // check whether this node became unbalanced) int balance = getBalance(root); // If this node becomes unbalanced, 4 cases arise // Left Left Case if (balance > 1 && getBalance(root->left) >= 0) return rightRotate(root); // Left Right Case if (balance > 1 && getBalance(root->left) < 0) { root->left = leftRotate(root->left); return rightRotate(root); } // Right Right Case if (balance < -1 && getBalance(root->right) <= 0) return leftRotate(root); // Right Left Case if (balance < -1 && getBalance(root->right) > 0) { root->right = rightRotate(root->right); return leftRotate(root); } return root;} // A utility function to print preorder traversal of// the tree.void preOrder(struct Node* root){ if (root != NULL) { printf(\"%d \", root->key); preOrder(root->left); preOrder(root->right); }} // Returns count ofint CountGreater(struct Node* root, int x){ int res = 0; // Search for x. While searching, keep // updating res if x is greater than // current node. while (root != NULL) { int desc = (root->right != NULL) ? root->right->desc : -1; if (root->key > x) { res = res + desc + 1 + 1; root = root->left; } else if (root->key < x) root = root->right; else { res = res + desc + 1; break; } } return res;} /* Driver program to test above function*/int main(){ struct Node* root = NULL; root = insert(root, 9); root = insert(root, 5); root = insert(root, 10); root = insert(root, 0); root = insert(root, 6); root = insert(root, 11); root = insert(root, -1); root = insert(root, 1); root = insert(root, 2); /* The constructed AVL Tree would be 9 / \\ 1 10 / \\ \\ 0 5 11 / / \\ -1 2 6 */ printf(\"Preorder traversal of the constructed AVL \" \"tree is \\n\"); preOrder(root); printf(\"\\nNumber of elements greater than 9 are %d\", CountGreater(root, 9)); root = deleteNode(root, 10); /* The AVL Tree after deletion of 10 1 / \\ 0 9 / / \\ -1 5 11 / \\ 2 6 */ printf(\"\\nPreorder traversal after deletion of 10 \\n\"); preOrder(root); printf(\"\\nNumber of elements greater than 9 are %d\", CountGreater(root, 9)); return 0;}",
"e": 36383,
"s": 28128,
"text": null
},
{
"code": null,
"e": 36391,
"s": 36383,
"text": "Output:"
},
{
"code": null,
"e": 36600,
"s": 36391,
"text": "Preorder traversal of the constructed AVL tree is\n9 1 0 -1 5 2 6 10 11\nNumber of elements greater than 9 are 2\nPreorder traversal after deletion of 10\n1 0 -1 9 5 2 6 11\nNumber of elements greater than 9 are 1"
},
{
"code": null,
"e": 37219,
"s": 36600,
"text": "Time Complexity: Time complexity of CountGreater function is O(log(n)) where n is number of nodes in avl tree, as we are basically searching for the given number in avl which takes O(log(n)) time. This article is contributed by Ashish Sharma. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above."
},
{
"code": null,
"e": 37228,
"s": 37219,
"text": "rkbhola5"
},
{
"code": null,
"e": 37237,
"s": 37228,
"text": "AVL-Tree"
},
{
"code": null,
"e": 37261,
"s": 37237,
"text": "Advanced Data Structure"
},
{
"code": null,
"e": 37280,
"s": 37261,
"text": "Binary Search Tree"
},
{
"code": null,
"e": 37299,
"s": 37280,
"text": "Binary Search Tree"
},
{
"code": null,
"e": 37308,
"s": 37299,
"text": "AVL-Tree"
},
{
"code": null,
"e": 37406,
"s": 37308,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 37435,
"s": 37406,
"text": "Ordered Set and GNU C++ PBDS"
},
{
"code": null,
"e": 37477,
"s": 37435,
"text": "2-3 Trees | (Search, Insert and Deletion)"
},
{
"code": null,
"e": 37523,
"s": 37477,
"text": "Extendible Hashing (Dynamic approach to DBMS)"
},
{
"code": null,
"e": 37559,
"s": 37523,
"text": "Suffix Array | Set 1 (Introduction)"
},
{
"code": null,
"e": 37573,
"s": 37559,
"text": "Interval Tree"
},
{
"code": null,
"e": 37623,
"s": 37573,
"text": "Binary Search Tree | Set 1 (Search and Insertion)"
},
{
"code": null,
"e": 37659,
"s": 37623,
"text": "Binary Search Tree | Set 2 (Delete)"
},
{
"code": null,
"e": 37709,
"s": 37659,
"text": "A program to check if a binary tree is BST or not"
},
{
"code": null,
"e": 37761,
"s": 37709,
"text": "Construct BST from given preorder traversal | Set 1"
}
] |
PHP | bcadd() Function - GeeksforGeeks
|
19 Apr, 2018
The bcadd() function in PHP is an inbuilt function and is used to add two arbitrary precision numbers. This function accepts two arbitrary precision numbers as strings and returns the addition of the two numbers after scaling the result to a specified precision.
Syntax:
string bcadd ( $num_str1, $num_str2, $scaleVal)
Parameters: This function accepts three parameters as shown in the above syntax and explained below:
$num_str1: This parameter is of string type and represents the left operand or one of the two numbers among which we want to perform the addition. This parameter is mandatory.
$num_str2: This parameter is of string type and represents the right operand or one of the two numbers among which we want to perform the addition. This parameter is mandatory.
$scaleVal: This parameter is of int type and is optional. This parameter tells the number of digits that will appear after the decimal in the result of addition. Itβs default value is zero.
Return Value: This function returns the addition of the two numbers $num_str1 and $num_str2 as string.
Examples:
Input: $num_str1 = 3, $num_str2 = 11.222
Output: 14
Since the parameter $scaleVal is not specified so
no digits after decimal is appeared in the
result after addition.
Input: $num_str1 = 3, $num_str2 = 11.222, $scaleVal = 4
Output: 14.2220
Below programs illustrate the bcadd() function in PHP :
Program 1:
<?php// PHP program to illustrate bcadd() function // input numbers with arbitrary precision$num_str1 = "3";$num_str2 = "11.222"; // calculates the addition of// the two numbers when $scaleVal is// not specified$res = bcadd($num_str1, $num_str2); echo $res; ?>
Output:
14
Program 2:
<?php// PHP program to illustrate bcadd() function // input numbers with arbitrary precision$num_str1 = "3";$num_str2 = "11.222"; // scale value$scaleVal = 4; // calculates the addition of the two// numbers when $scaleVal is specified$res = bcadd($num_str1, $num_str2, $scaleVal); echo $res; ?>
Output:
14.2220
Reference:http://php.net/manual/en/function.bcadd.php
PHP-bc
PHP
Web Technologies
PHP
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to fetch data from localserver database and display on HTML table using PHP ?
How to create admin login page using PHP?
PHP str_replace() Function
Different ways for passing data to view in Laravel
How to pass form variables from one page to other page in PHP ?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
How to insert spaces/tabs in text using HTML/CSS?
|
[
{
"code": null,
"e": 26217,
"s": 26189,
"text": "\n19 Apr, 2018"
},
{
"code": null,
"e": 26480,
"s": 26217,
"text": "The bcadd() function in PHP is an inbuilt function and is used to add two arbitrary precision numbers. This function accepts two arbitrary precision numbers as strings and returns the addition of the two numbers after scaling the result to a specified precision."
},
{
"code": null,
"e": 26488,
"s": 26480,
"text": "Syntax:"
},
{
"code": null,
"e": 26536,
"s": 26488,
"text": "string bcadd ( $num_str1, $num_str2, $scaleVal)"
},
{
"code": null,
"e": 26637,
"s": 26536,
"text": "Parameters: This function accepts three parameters as shown in the above syntax and explained below:"
},
{
"code": null,
"e": 26813,
"s": 26637,
"text": "$num_str1: This parameter is of string type and represents the left operand or one of the two numbers among which we want to perform the addition. This parameter is mandatory."
},
{
"code": null,
"e": 26990,
"s": 26813,
"text": "$num_str2: This parameter is of string type and represents the right operand or one of the two numbers among which we want to perform the addition. This parameter is mandatory."
},
{
"code": null,
"e": 27180,
"s": 26990,
"text": "$scaleVal: This parameter is of int type and is optional. This parameter tells the number of digits that will appear after the decimal in the result of addition. Itβs default value is zero."
},
{
"code": null,
"e": 27283,
"s": 27180,
"text": "Return Value: This function returns the addition of the two numbers $num_str1 and $num_str2 as string."
},
{
"code": null,
"e": 27293,
"s": 27283,
"text": "Examples:"
},
{
"code": null,
"e": 27538,
"s": 27293,
"text": "Input: $num_str1 = 3, $num_str2 = 11.222\nOutput: 14\nSince the parameter $scaleVal is not specified so\nno digits after decimal is appeared in the \nresult after addition.\n\nInput: $num_str1 = 3, $num_str2 = 11.222, $scaleVal = 4\nOutput: 14.2220\n"
},
{
"code": null,
"e": 27594,
"s": 27538,
"text": "Below programs illustrate the bcadd() function in PHP :"
},
{
"code": null,
"e": 27605,
"s": 27594,
"text": "Program 1:"
},
{
"code": "<?php// PHP program to illustrate bcadd() function // input numbers with arbitrary precision$num_str1 = \"3\";$num_str2 = \"11.222\"; // calculates the addition of// the two numbers when $scaleVal is// not specified$res = bcadd($num_str1, $num_str2); echo $res; ?>",
"e": 27873,
"s": 27605,
"text": null
},
{
"code": null,
"e": 27881,
"s": 27873,
"text": "Output:"
},
{
"code": null,
"e": 27885,
"s": 27881,
"text": "14\n"
},
{
"code": null,
"e": 27896,
"s": 27885,
"text": "Program 2:"
},
{
"code": "<?php// PHP program to illustrate bcadd() function // input numbers with arbitrary precision$num_str1 = \"3\";$num_str2 = \"11.222\"; // scale value$scaleVal = 4; // calculates the addition of the two// numbers when $scaleVal is specified$res = bcadd($num_str1, $num_str2, $scaleVal); echo $res; ?>",
"e": 28199,
"s": 27896,
"text": null
},
{
"code": null,
"e": 28207,
"s": 28199,
"text": "Output:"
},
{
"code": null,
"e": 28216,
"s": 28207,
"text": "14.2220\n"
},
{
"code": null,
"e": 28270,
"s": 28216,
"text": "Reference:http://php.net/manual/en/function.bcadd.php"
},
{
"code": null,
"e": 28277,
"s": 28270,
"text": "PHP-bc"
},
{
"code": null,
"e": 28281,
"s": 28277,
"text": "PHP"
},
{
"code": null,
"e": 28298,
"s": 28281,
"text": "Web Technologies"
},
{
"code": null,
"e": 28302,
"s": 28298,
"text": "PHP"
},
{
"code": null,
"e": 28400,
"s": 28302,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28482,
"s": 28400,
"text": "How to fetch data from localserver database and display on HTML table using PHP ?"
},
{
"code": null,
"e": 28524,
"s": 28482,
"text": "How to create admin login page using PHP?"
},
{
"code": null,
"e": 28551,
"s": 28524,
"text": "PHP str_replace() Function"
},
{
"code": null,
"e": 28602,
"s": 28551,
"text": "Different ways for passing data to view in Laravel"
},
{
"code": null,
"e": 28666,
"s": 28602,
"text": "How to pass form variables from one page to other page in PHP ?"
},
{
"code": null,
"e": 28706,
"s": 28666,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 28739,
"s": 28706,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 28784,
"s": 28739,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 28827,
"s": 28784,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
CSS Grid Layout: The Fr Unit - GeeksforGeeks
|
02 Jul, 2021
The CSS Grid Layout module is used to create a grid-based layout system, with the help of rows and columns it makes easier to design any webpage without using floats and positioning.
Syntax:
.class {
display:grid;
}
Note: An HTML element becomes a grid if that element sets display:grid
grid-template-columns : This specifies the size of the columns
grid-template-rows : Specifies the size of the rows.
grid-gap : sets the gaps between rows and columns.
Some grid-template-columns keyword value:
grid-template-columns: repeat( [ <positive-integer> | auto-fill | auto-fit ], <track-list> );
grid-template-rows: repeat( [ <positive-integer> | auto-fill | auto-fit ], <track-list> );
Represents a repeated fragment of the tracklist, allowing a large number of columns that exhibit a recurring pattern to be written in a more compact form. It allows you to define a pattern repeated X times.
grid-template-columns: auto;
grid-template-rows: auto;
Indicates auto-placement, an automatic span, or a default span of one Column is fitted to the content in the column. The row is fitted to the content in the row.
grid-template-columns: minmax(min, max);
grid-template-rows: minmax(min, max);
Is a functional notation that defines a size range greater than or equal to min and less than or equal to max
The Fr Unit : Fr is a fractional unit.
The Fr unit is an input that automatically calculates layout divisions when adjusting for gaps inside the grid.
Example 1.This example illustrates the use of fr unit.
html
<!DOCTYPE html><html> <head> <style> .container { display: grid; grid-template-columns: 1fr 1fr 1fr 1fr; grid-template-rows: 100px; grid-gap: 10px; } .container div { border: 3px black; border-radius: 7px; background-color: yellowgreen; padding: 1em; text-align: center; color: darkgreen; } h1 { color: green; text-align: center; </style></head> <body> <h1>GeeksforGeeks</h1> <div class="container"> <div>geeksforgeeks 1</div> <div>geeksforgeeks 2</div> <div>geeksforgeeks 3</div> <div>geeksforgeeks 4</div> </div></body> </html>
output 1
We have 4 columns each take up the same amount of space. Each has a width of 1fr. Each column is equal. 1fr=25% of the available space.
Example 2. This example illustrates the use of fr unit with different fractional values.
html
<!DOCTYPE html><html> <head> <style> .container { display: grid; grid-template-columns: 1fr 1fr 2fr 2fr; grid-template-rows: 100px 150px 200px 200px; grid-gap: 10px; } .container div { border: 3px black; border-radius: 7px; background-color: yellowgreen; padding: 1em; text-align: center; color: darkgreen; } h1 { color: green; text-align: center; </style></head> <body> <h1>GeeksforGeeks</h1> <div class="container"> <div>geeksforgeeks 1</div> <div>geeksforgeeks 2</div> <div>geeksforgeeks 3</div> <div>geeksforgeeks 4</div> <div>geeksforgeeks 5</div> <div>geeksforgeeks 6</div> <div>geeksforgeeks 7</div> <div>geeksforgeeks 8</div> <div>geeksforgeeks 9</div> <div>geeksforgeeks 10</div> <div>geeksforgeeks 11</div> <div>geeksforgeeks 12</div> <div>geeksforgeeks 13</div> <div>geeksforgeeks 14</div> <div>geeksforgeeks 15</div> <div>geeksforgeeks 16</div> </div></body> </html>
output 2
We have 4 columns, the first two columns take up the same amount of space i.e. 1fr and the last two columns take up the same amount of space i.e. 2fr.
Example 2. This example illustrates the use of fr unit with repeat() and auto notation.
html
<!DOCTYPE html><html> <head> <style> .container { display: grid; grid-template-columns: repeat(2, 1fr) repeat(2, 2fr); grid-template-rows: auto; grid-gap: 10px; } .container div { border: 3px black; border-radius: 7px; background-color: yellowgreen; padding: 1em; text-align: center; color: darkgreen; } /* Designing h1 element */ h1 { color: green; text-align: center; </style></head> <body> <h1>GeeksforGeeks</h1> <div class="container"> <div>geeksforgeeks 1</div> <div>geeksforgeeks 2</div> <div>geeksforgeeks 3</div> <div>geeksforgeeks 4</div> <div>geeksforgeeks 5</div> <div>geeksforgeeks 6</div> <div>geeksforgeeks 7</div> <div>geeksforgeeks 8</div> <div>geeksforgeeks 9</div> <div>geeksforgeeks 10</div> <div>geeksforgeeks 11</div> <div>geeksforgeeks 12</div> <div>geeksforgeeks 13</div> <div>geeksforgeeks 14</div> <div>geeksforgeeks 15</div> <div>geeksforgeeks 16</div> </div></body> </html>
repeat(number of columns/rows, the column width we want);
output 3
Supported Browsers:
Google Chrome
Internet Explorer
Firefox
Opera
Safari
ysachin2314
CSS-Basics
CSS
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to apply style to parent if it has child with CSS?
Types of CSS (Cascading Style Sheet)
How to position a div at the bottom of its container using CSS?
Design a web page using HTML and CSS
How to Upload Image into Database and Display it using PHP ?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
Difference between var, let and const keywords in JavaScript
|
[
{
"code": null,
"e": 26589,
"s": 26561,
"text": "\n02 Jul, 2021"
},
{
"code": null,
"e": 26772,
"s": 26589,
"text": "The CSS Grid Layout module is used to create a grid-based layout system, with the help of rows and columns it makes easier to design any webpage without using floats and positioning."
},
{
"code": null,
"e": 26782,
"s": 26772,
"text": "Syntax: "
},
{
"code": null,
"e": 26811,
"s": 26782,
"text": ".class {\n display:grid;\n}"
},
{
"code": null,
"e": 26884,
"s": 26813,
"text": "Note: An HTML element becomes a grid if that element sets display:grid"
},
{
"code": null,
"e": 26949,
"s": 26886,
"text": "grid-template-columns : This specifies the size of the columns"
},
{
"code": null,
"e": 27002,
"s": 26949,
"text": "grid-template-rows : Specifies the size of the rows."
},
{
"code": null,
"e": 27053,
"s": 27002,
"text": "grid-gap : sets the gaps between rows and columns."
},
{
"code": null,
"e": 27097,
"s": 27055,
"text": "Some grid-template-columns keyword value:"
},
{
"code": null,
"e": 27193,
"s": 27099,
"text": "grid-template-columns: repeat( [ <positive-integer> | auto-fill | auto-fit ], <track-list> );"
},
{
"code": null,
"e": 27284,
"s": 27193,
"text": "grid-template-rows: repeat( [ <positive-integer> | auto-fill | auto-fit ], <track-list> );"
},
{
"code": null,
"e": 27493,
"s": 27284,
"text": "Represents a repeated fragment of the tracklist, allowing a large number of columns that exhibit a recurring pattern to be written in a more compact form. It allows you to define a pattern repeated X times. "
},
{
"code": null,
"e": 27523,
"s": 27493,
"text": "grid-template-columns: auto; "
},
{
"code": null,
"e": 27549,
"s": 27523,
"text": "grid-template-rows: auto;"
},
{
"code": null,
"e": 27712,
"s": 27549,
"text": "Indicates auto-placement, an automatic span, or a default span of one Column is fitted to the content in the column. The row is fitted to the content in the row. "
},
{
"code": null,
"e": 27753,
"s": 27712,
"text": "grid-template-columns: minmax(min, max);"
},
{
"code": null,
"e": 27791,
"s": 27753,
"text": "grid-template-rows: minmax(min, max);"
},
{
"code": null,
"e": 27902,
"s": 27791,
"text": "Is a functional notation that defines a size range greater than or equal to min and less than or equal to max "
},
{
"code": null,
"e": 27941,
"s": 27902,
"text": "The Fr Unit : Fr is a fractional unit."
},
{
"code": null,
"e": 28053,
"s": 27941,
"text": "The Fr unit is an input that automatically calculates layout divisions when adjusting for gaps inside the grid."
},
{
"code": null,
"e": 28110,
"s": 28055,
"text": "Example 1.This example illustrates the use of fr unit."
},
{
"code": null,
"e": 28117,
"s": 28112,
"text": "html"
},
{
"code": "<!DOCTYPE html><html> <head> <style> .container { display: grid; grid-template-columns: 1fr 1fr 1fr 1fr; grid-template-rows: 100px; grid-gap: 10px; } .container div { border: 3px black; border-radius: 7px; background-color: yellowgreen; padding: 1em; text-align: center; color: darkgreen; } h1 { color: green; text-align: center; </style></head> <body> <h1>GeeksforGeeks</h1> <div class=\"container\"> <div>geeksforgeeks 1</div> <div>geeksforgeeks 2</div> <div>geeksforgeeks 3</div> <div>geeksforgeeks 4</div> </div></body> </html>",
"e": 28862,
"s": 28117,
"text": null
},
{
"code": null,
"e": 28871,
"s": 28862,
"text": "output 1"
},
{
"code": null,
"e": 29007,
"s": 28871,
"text": "We have 4 columns each take up the same amount of space. Each has a width of 1fr. Each column is equal. 1fr=25% of the available space."
},
{
"code": null,
"e": 29096,
"s": 29007,
"text": "Example 2. This example illustrates the use of fr unit with different fractional values."
},
{
"code": null,
"e": 29101,
"s": 29096,
"text": "html"
},
{
"code": "<!DOCTYPE html><html> <head> <style> .container { display: grid; grid-template-columns: 1fr 1fr 2fr 2fr; grid-template-rows: 100px 150px 200px 200px; grid-gap: 10px; } .container div { border: 3px black; border-radius: 7px; background-color: yellowgreen; padding: 1em; text-align: center; color: darkgreen; } h1 { color: green; text-align: center; </style></head> <body> <h1>GeeksforGeeks</h1> <div class=\"container\"> <div>geeksforgeeks 1</div> <div>geeksforgeeks 2</div> <div>geeksforgeeks 3</div> <div>geeksforgeeks 4</div> <div>geeksforgeeks 5</div> <div>geeksforgeeks 6</div> <div>geeksforgeeks 7</div> <div>geeksforgeeks 8</div> <div>geeksforgeeks 9</div> <div>geeksforgeeks 10</div> <div>geeksforgeeks 11</div> <div>geeksforgeeks 12</div> <div>geeksforgeeks 13</div> <div>geeksforgeeks 14</div> <div>geeksforgeeks 15</div> <div>geeksforgeeks 16</div> </div></body> </html>",
"e": 30115,
"s": 29101,
"text": null
},
{
"code": null,
"e": 30124,
"s": 30115,
"text": "output 2"
},
{
"code": null,
"e": 30275,
"s": 30124,
"text": "We have 4 columns, the first two columns take up the same amount of space i.e. 1fr and the last two columns take up the same amount of space i.e. 2fr."
},
{
"code": null,
"e": 30363,
"s": 30275,
"text": "Example 2. This example illustrates the use of fr unit with repeat() and auto notation."
},
{
"code": null,
"e": 30368,
"s": 30363,
"text": "html"
},
{
"code": "<!DOCTYPE html><html> <head> <style> .container { display: grid; grid-template-columns: repeat(2, 1fr) repeat(2, 2fr); grid-template-rows: auto; grid-gap: 10px; } .container div { border: 3px black; border-radius: 7px; background-color: yellowgreen; padding: 1em; text-align: center; color: darkgreen; } /* Designing h1 element */ h1 { color: green; text-align: center; </style></head> <body> <h1>GeeksforGeeks</h1> <div class=\"container\"> <div>geeksforgeeks 1</div> <div>geeksforgeeks 2</div> <div>geeksforgeeks 3</div> <div>geeksforgeeks 4</div> <div>geeksforgeeks 5</div> <div>geeksforgeeks 6</div> <div>geeksforgeeks 7</div> <div>geeksforgeeks 8</div> <div>geeksforgeeks 9</div> <div>geeksforgeeks 10</div> <div>geeksforgeeks 11</div> <div>geeksforgeeks 12</div> <div>geeksforgeeks 13</div> <div>geeksforgeeks 14</div> <div>geeksforgeeks 15</div> <div>geeksforgeeks 16</div> </div></body> </html>",
"e": 31408,
"s": 30368,
"text": null
},
{
"code": null,
"e": 31466,
"s": 31408,
"text": "repeat(number of columns/rows, the column width we want);"
},
{
"code": null,
"e": 31475,
"s": 31466,
"text": "output 3"
},
{
"code": null,
"e": 31495,
"s": 31475,
"text": "Supported Browsers:"
},
{
"code": null,
"e": 31509,
"s": 31495,
"text": "Google Chrome"
},
{
"code": null,
"e": 31527,
"s": 31509,
"text": "Internet Explorer"
},
{
"code": null,
"e": 31535,
"s": 31527,
"text": "Firefox"
},
{
"code": null,
"e": 31541,
"s": 31535,
"text": "Opera"
},
{
"code": null,
"e": 31548,
"s": 31541,
"text": "Safari"
},
{
"code": null,
"e": 31560,
"s": 31548,
"text": "ysachin2314"
},
{
"code": null,
"e": 31571,
"s": 31560,
"text": "CSS-Basics"
},
{
"code": null,
"e": 31575,
"s": 31571,
"text": "CSS"
},
{
"code": null,
"e": 31592,
"s": 31575,
"text": "Web Technologies"
},
{
"code": null,
"e": 31690,
"s": 31592,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 31745,
"s": 31690,
"text": "How to apply style to parent if it has child with CSS?"
},
{
"code": null,
"e": 31782,
"s": 31745,
"text": "Types of CSS (Cascading Style Sheet)"
},
{
"code": null,
"e": 31846,
"s": 31782,
"text": "How to position a div at the bottom of its container using CSS?"
},
{
"code": null,
"e": 31883,
"s": 31846,
"text": "Design a web page using HTML and CSS"
},
{
"code": null,
"e": 31944,
"s": 31883,
"text": "How to Upload Image into Database and Display it using PHP ?"
},
{
"code": null,
"e": 31984,
"s": 31944,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 32017,
"s": 31984,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 32062,
"s": 32017,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 32105,
"s": 32062,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
How to create dynamic autocomplete search using Bootstrap Typeahead ? - GeeksforGeeks
|
14 Jul, 2021
In this article, we will learn to implement dynamic autocomplete search using Bootstrap Typeahead. Bootstrap Typeahead is a plugin that helps to add a beautiful autocomplete option in the search bar.
In this approach, we will be taking static data for autocomplete, but we can also use dynamic JSON data as well, to show search options. The methods used in the Typeahead plugin are as follows.
.typeahead(options): Initializes an input with a typeahead.
.lookup: To trigger the lookup function externally.
.getActive: To get the currently active item, you will get a string or a JSON object depending on how you initialized typeahead. It works only for the first match.
Example: The following example demonstrates the dynamic autocomplete search using Bootstrap Typehead.
HTML
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content= "width=device-width, initial-scale=1.0" /> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"> </script> <script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-3-typeahead/4.0.2/bootstrap3-typeahead.min.js"> </script> <link href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" rel="stylesheet" /> <style> .typeahead { width: 50%; top: 60px !important; left: 50px !important; } </style></head> <body style="text-align: center"> <div> <b><p>Suggest the states of India</p></b> <input type="text" class="typeahead" data-provide="typeahead" placeholder="Enter name of states of India " /> </div> <script> // Initializes input( name of states) // with a typeahead var $input = $(".typeahead"); $input.typeahead({ source: [ "Andhra Pradesh", "Arunachal Pradesh", "Assam", "Bihar", "Chhattisgarh", "Goa", "Gujarat", "Haryana", "Himachal Pradesh", "Jharkhand", "Karnataka", "Kerala", "Madhya Pradesh", "Maharashtra", "Manipur", "Meghalaya", "Mizoram", "Nagaland", "Odisha", "Punjab", "Rajasthan", "Sikkim", "Tamil Nadu", "Telangana", "Tripura", "Uttar Pradesh", "Uttarakhand", "West Bengal", ], autoSelect: true, }); $input.change(function () { var current = $input.typeahead("getActive"); matches = []; if (current) { // Some item from your input matches // with entered data if (current.name == $input.val()) { matches.push(current.name); } } }); </script></body> </html>
Output:
dynamix automatic typeahead
Bootstrap-Questions
Picked
Bootstrap
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Show Images on Click using HTML ?
How to set Bootstrap Timepicker using datetimepicker library ?
How to Use Bootstrap with React?
How to keep gap between columns using Bootstrap?
Tailwind CSS vs Bootstrap
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
How to insert spaces/tabs in text using HTML/CSS?
|
[
{
"code": null,
"e": 26865,
"s": 26837,
"text": "\n14 Jul, 2021"
},
{
"code": null,
"e": 27066,
"s": 26865,
"text": "In this article, we will learn to implement dynamic autocomplete search using Bootstrap Typeahead. Bootstrap Typeahead is a plugin that helps to add a beautiful autocomplete option in the search bar. "
},
{
"code": null,
"e": 27260,
"s": 27066,
"text": "In this approach, we will be taking static data for autocomplete, but we can also use dynamic JSON data as well, to show search options. The methods used in the Typeahead plugin are as follows."
},
{
"code": null,
"e": 27320,
"s": 27260,
"text": ".typeahead(options): Initializes an input with a typeahead."
},
{
"code": null,
"e": 27372,
"s": 27320,
"text": ".lookup: To trigger the lookup function externally."
},
{
"code": null,
"e": 27536,
"s": 27372,
"text": ".getActive: To get the currently active item, you will get a string or a JSON object depending on how you initialized typeahead. It works only for the first match."
},
{
"code": null,
"e": 27638,
"s": 27536,
"text": "Example: The following example demonstrates the dynamic autocomplete search using Bootstrap Typehead."
},
{
"code": null,
"e": 27643,
"s": 27638,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\" /> <meta name=\"viewport\" content= \"width=device-width, initial-scale=1.0\" /> <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js\"> </script> <script src=\"https://cdnjs.cloudflare.com/ajax/libs/bootstrap-3-typeahead/4.0.2/bootstrap3-typeahead.min.js\"> </script> <link href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css\" rel=\"stylesheet\" /> <style> .typeahead { width: 50%; top: 60px !important; left: 50px !important; } </style></head> <body style=\"text-align: center\"> <div> <b><p>Suggest the states of India</p></b> <input type=\"text\" class=\"typeahead\" data-provide=\"typeahead\" placeholder=\"Enter name of states of India \" /> </div> <script> // Initializes input( name of states) // with a typeahead var $input = $(\".typeahead\"); $input.typeahead({ source: [ \"Andhra Pradesh\", \"Arunachal Pradesh\", \"Assam\", \"Bihar\", \"Chhattisgarh\", \"Goa\", \"Gujarat\", \"Haryana\", \"Himachal Pradesh\", \"Jharkhand\", \"Karnataka\", \"Kerala\", \"Madhya Pradesh\", \"Maharashtra\", \"Manipur\", \"Meghalaya\", \"Mizoram\", \"Nagaland\", \"Odisha\", \"Punjab\", \"Rajasthan\", \"Sikkim\", \"Tamil Nadu\", \"Telangana\", \"Tripura\", \"Uttar Pradesh\", \"Uttarakhand\", \"West Bengal\", ], autoSelect: true, }); $input.change(function () { var current = $input.typeahead(\"getActive\"); matches = []; if (current) { // Some item from your input matches // with entered data if (current.name == $input.val()) { matches.push(current.name); } } }); </script></body> </html>",
"e": 29960,
"s": 27643,
"text": null
},
{
"code": null,
"e": 29968,
"s": 29960,
"text": "Output:"
},
{
"code": null,
"e": 29996,
"s": 29968,
"text": "dynamix automatic typeahead"
},
{
"code": null,
"e": 30016,
"s": 29996,
"text": "Bootstrap-Questions"
},
{
"code": null,
"e": 30023,
"s": 30016,
"text": "Picked"
},
{
"code": null,
"e": 30033,
"s": 30023,
"text": "Bootstrap"
},
{
"code": null,
"e": 30050,
"s": 30033,
"text": "Web Technologies"
},
{
"code": null,
"e": 30148,
"s": 30050,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30189,
"s": 30148,
"text": "How to Show Images on Click using HTML ?"
},
{
"code": null,
"e": 30252,
"s": 30189,
"text": "How to set Bootstrap Timepicker using datetimepicker library ?"
},
{
"code": null,
"e": 30285,
"s": 30252,
"text": "How to Use Bootstrap with React?"
},
{
"code": null,
"e": 30334,
"s": 30285,
"text": "How to keep gap between columns using Bootstrap?"
},
{
"code": null,
"e": 30360,
"s": 30334,
"text": "Tailwind CSS vs Bootstrap"
},
{
"code": null,
"e": 30400,
"s": 30360,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 30433,
"s": 30400,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 30478,
"s": 30433,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 30521,
"s": 30478,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
How to write your own header file in C? - GeeksforGeeks
|
24 Oct, 2017
As we all know that files with .h extension are called header files in C. These header files generally contain function declarations which we can be used in our main C program, like for e.g. there is need to include stdio.h in our C program to use function printf() in the program. So the question arises, is it possible to create your own header file?
The answer to the above is yes. header files are simply files in which you can declare your own functions that you can use in your main program or these can be used while writing large C programs.NOTE:Header files generally contain definitions of data types, function prototypes and C preprocessor commands.
Below is the short example of creating your own header file and using it accordingly.
Creating myhead.h : Write the below code and then save the file as myhead.h or you can give any name but the extension should be .h indicating its a header file.// It is not recommended to put function definitions // in a header file. Ideally there should be only// function declarations. Purpose of this code is// to only demonstrate working of header files.void add(int a, int b){ printf("Added value=%d\n", a + b);}void multiply(int a, int b){ printf("Multiplied value=%d\n", a * b);}Including the .h file in other program : Now as we need to include stdio.h as #include in order to use printf() function. We will also need to include the above header file myhead.h as #includeβmyhead.hβ. The β β here are used to instructs the preprocessor to look into the present folder and into the standard folder of all header files if not found in present folder. So, if you wish to use angular brackets instead of β β to include your header file you can save it in the standard folder of header files otherwise. If you are using β β you need to ensure that the header file you created is saved in the same folder in which you will save the C file using this header file.Using the created header file :// C program to use the above created header file#include <stdio.h>#include "myhead.h"int main(){ add(4, 6); /*This calls add function written in myhead.h and therefore no compilation error.*/ multiply(5, 5); // Same for the multiply function in myhead.h printf("BYE!See you Soon"); return 0;}Output:Added value:10
Multiplied value:25
BYE!See you Soon
NOTE : The above code compiles successfully and prints the above output only if you have created the header file and saved it in the same folder the above c file is saved.
Creating myhead.h : Write the below code and then save the file as myhead.h or you can give any name but the extension should be .h indicating its a header file.// It is not recommended to put function definitions // in a header file. Ideally there should be only// function declarations. Purpose of this code is// to only demonstrate working of header files.void add(int a, int b){ printf("Added value=%d\n", a + b);}void multiply(int a, int b){ printf("Multiplied value=%d\n", a * b);}
// It is not recommended to put function definitions // in a header file. Ideally there should be only// function declarations. Purpose of this code is// to only demonstrate working of header files.void add(int a, int b){ printf("Added value=%d\n", a + b);}void multiply(int a, int b){ printf("Multiplied value=%d\n", a * b);}
Including the .h file in other program : Now as we need to include stdio.h as #include in order to use printf() function. We will also need to include the above header file myhead.h as #includeβmyhead.hβ. The β β here are used to instructs the preprocessor to look into the present folder and into the standard folder of all header files if not found in present folder. So, if you wish to use angular brackets instead of β β to include your header file you can save it in the standard folder of header files otherwise. If you are using β β you need to ensure that the header file you created is saved in the same folder in which you will save the C file using this header file.
Using the created header file :// C program to use the above created header file#include <stdio.h>#include "myhead.h"int main(){ add(4, 6); /*This calls add function written in myhead.h and therefore no compilation error.*/ multiply(5, 5); // Same for the multiply function in myhead.h printf("BYE!See you Soon"); return 0;}Output:Added value:10
Multiplied value:25
BYE!See you Soon
NOTE : The above code compiles successfully and prints the above output only if you have created the header file and saved it in the same folder the above c file is saved.
// C program to use the above created header file#include <stdio.h>#include "myhead.h"int main(){ add(4, 6); /*This calls add function written in myhead.h and therefore no compilation error.*/ multiply(5, 5); // Same for the multiply function in myhead.h printf("BYE!See you Soon"); return 0;}
Output:
Added value:10
Multiplied value:25
BYE!See you Soon
NOTE : The above code compiles successfully and prints the above output only if you have created the header file and saved it in the same folder the above c file is saved.
Important Points:The creation of header files are needed generally while writing large C programs so that the modules can share the function definitions, prototypes etc.
Function and type declarations, global variables, structure declarations and in some cases, inline functions; definitions which need to be centralized in one file.
In a header file, do not use redundant or other header files; only minimal set of statements.
Donβt put function definitions in a header. Put these things in a separate .c file.
Include Declarations for functions and variables whose definitions will be visible to the linker. Also, definitions of data structures and enumerations that are shared among multiple source files.
In short, Put only what is necessary and keep the header file concised.
This article is merely to give you idea about the creation of header files and using the same but this is not what actually happens when you write a large C program. The creation of header files are needed generally while writing large C programs so that the modules can share the function definitions, prototypes etc.
This article is contributed by Dimpy Varshni. If you like GeeksforGeeks and would like to contribute, you can also write an article using contribute.geeksforgeeks.org or mail your article to contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.
Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
File Handling
C Language
File Handling
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Substring in C++
Multidimensional Arrays in C / C++
Left Shift and Right Shift Operators in C/C++
Converting Strings to Numbers in C/C++
Core Dump (Segmentation fault) in C/C++
rand() and srand() in C/C++
std::string class in C++
fork() in C
Command line arguments in C/C++
Enumeration (or enum) in C
|
[
{
"code": null,
"e": 25645,
"s": 25617,
"text": "\n24 Oct, 2017"
},
{
"code": null,
"e": 25998,
"s": 25645,
"text": "As we all know that files with .h extension are called header files in C. These header files generally contain function declarations which we can be used in our main C program, like for e.g. there is need to include stdio.h in our C program to use function printf() in the program. So the question arises, is it possible to create your own header file?"
},
{
"code": null,
"e": 26306,
"s": 25998,
"text": "The answer to the above is yes. header files are simply files in which you can declare your own functions that you can use in your main program or these can be used while writing large C programs.NOTE:Header files generally contain definitions of data types, function prototypes and C preprocessor commands."
},
{
"code": null,
"e": 26392,
"s": 26306,
"text": "Below is the short example of creating your own header file and using it accordingly."
},
{
"code": null,
"e": 28146,
"s": 26392,
"text": "Creating myhead.h : Write the below code and then save the file as myhead.h or you can give any name but the extension should be .h indicating its a header file.// It is not recommended to put function definitions // in a header file. Ideally there should be only// function declarations. Purpose of this code is// to only demonstrate working of header files.void add(int a, int b){ printf(\"Added value=%d\\n\", a + b);}void multiply(int a, int b){ printf(\"Multiplied value=%d\\n\", a * b);}Including the .h file in other program : Now as we need to include stdio.h as #include in order to use printf() function. We will also need to include the above header file myhead.h as #includeβmyhead.hβ. The β β here are used to instructs the preprocessor to look into the present folder and into the standard folder of all header files if not found in present folder. So, if you wish to use angular brackets instead of β β to include your header file you can save it in the standard folder of header files otherwise. If you are using β β you need to ensure that the header file you created is saved in the same folder in which you will save the C file using this header file.Using the created header file :// C program to use the above created header file#include <stdio.h>#include \"myhead.h\"int main(){ add(4, 6); /*This calls add function written in myhead.h and therefore no compilation error.*/ multiply(5, 5); // Same for the multiply function in myhead.h printf(\"BYE!See you Soon\"); return 0;}Output:Added value:10\nMultiplied value:25\nBYE!See you Soon\nNOTE : The above code compiles successfully and prints the above output only if you have created the header file and saved it in the same folder the above c file is saved."
},
{
"code": null,
"e": 28640,
"s": 28146,
"text": "Creating myhead.h : Write the below code and then save the file as myhead.h or you can give any name but the extension should be .h indicating its a header file.// It is not recommended to put function definitions // in a header file. Ideally there should be only// function declarations. Purpose of this code is// to only demonstrate working of header files.void add(int a, int b){ printf(\"Added value=%d\\n\", a + b);}void multiply(int a, int b){ printf(\"Multiplied value=%d\\n\", a * b);}"
},
{
"code": "// It is not recommended to put function definitions // in a header file. Ideally there should be only// function declarations. Purpose of this code is// to only demonstrate working of header files.void add(int a, int b){ printf(\"Added value=%d\\n\", a + b);}void multiply(int a, int b){ printf(\"Multiplied value=%d\\n\", a * b);}",
"e": 28973,
"s": 28640,
"text": null
},
{
"code": null,
"e": 29651,
"s": 28973,
"text": "Including the .h file in other program : Now as we need to include stdio.h as #include in order to use printf() function. We will also need to include the above header file myhead.h as #includeβmyhead.hβ. The β β here are used to instructs the preprocessor to look into the present folder and into the standard folder of all header files if not found in present folder. So, if you wish to use angular brackets instead of β β to include your header file you can save it in the standard folder of header files otherwise. If you are using β β you need to ensure that the header file you created is saved in the same folder in which you will save the C file using this header file."
},
{
"code": null,
"e": 30235,
"s": 29651,
"text": "Using the created header file :// C program to use the above created header file#include <stdio.h>#include \"myhead.h\"int main(){ add(4, 6); /*This calls add function written in myhead.h and therefore no compilation error.*/ multiply(5, 5); // Same for the multiply function in myhead.h printf(\"BYE!See you Soon\"); return 0;}Output:Added value:10\nMultiplied value:25\nBYE!See you Soon\nNOTE : The above code compiles successfully and prints the above output only if you have created the header file and saved it in the same folder the above c file is saved."
},
{
"code": "// C program to use the above created header file#include <stdio.h>#include \"myhead.h\"int main(){ add(4, 6); /*This calls add function written in myhead.h and therefore no compilation error.*/ multiply(5, 5); // Same for the multiply function in myhead.h printf(\"BYE!See you Soon\"); return 0;}",
"e": 30558,
"s": 30235,
"text": null
},
{
"code": null,
"e": 30566,
"s": 30558,
"text": "Output:"
},
{
"code": null,
"e": 30619,
"s": 30566,
"text": "Added value:10\nMultiplied value:25\nBYE!See you Soon\n"
},
{
"code": null,
"e": 30791,
"s": 30619,
"text": "NOTE : The above code compiles successfully and prints the above output only if you have created the header file and saved it in the same folder the above c file is saved."
},
{
"code": null,
"e": 30961,
"s": 30791,
"text": "Important Points:The creation of header files are needed generally while writing large C programs so that the modules can share the function definitions, prototypes etc."
},
{
"code": null,
"e": 31125,
"s": 30961,
"text": "Function and type declarations, global variables, structure declarations and in some cases, inline functions; definitions which need to be centralized in one file."
},
{
"code": null,
"e": 31219,
"s": 31125,
"text": "In a header file, do not use redundant or other header files; only minimal set of statements."
},
{
"code": null,
"e": 31303,
"s": 31219,
"text": "Donβt put function definitions in a header. Put these things in a separate .c file."
},
{
"code": null,
"e": 31500,
"s": 31303,
"text": "Include Declarations for functions and variables whose definitions will be visible to the linker. Also, definitions of data structures and enumerations that are shared among multiple source files."
},
{
"code": null,
"e": 31572,
"s": 31500,
"text": "In short, Put only what is necessary and keep the header file concised."
},
{
"code": null,
"e": 31891,
"s": 31572,
"text": "This article is merely to give you idea about the creation of header files and using the same but this is not what actually happens when you write a large C program. The creation of header files are needed generally while writing large C programs so that the modules can share the function definitions, prototypes etc."
},
{
"code": null,
"e": 32192,
"s": 31891,
"text": "This article is contributed by Dimpy Varshni. If you like GeeksforGeeks and would like to contribute, you can also write an article using contribute.geeksforgeeks.org or mail your article to contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks."
},
{
"code": null,
"e": 32317,
"s": 32192,
"text": "Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above."
},
{
"code": null,
"e": 32331,
"s": 32317,
"text": "File Handling"
},
{
"code": null,
"e": 32342,
"s": 32331,
"text": "C Language"
},
{
"code": null,
"e": 32356,
"s": 32342,
"text": "File Handling"
},
{
"code": null,
"e": 32454,
"s": 32356,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 32471,
"s": 32454,
"text": "Substring in C++"
},
{
"code": null,
"e": 32506,
"s": 32471,
"text": "Multidimensional Arrays in C / C++"
},
{
"code": null,
"e": 32552,
"s": 32506,
"text": "Left Shift and Right Shift Operators in C/C++"
},
{
"code": null,
"e": 32591,
"s": 32552,
"text": "Converting Strings to Numbers in C/C++"
},
{
"code": null,
"e": 32631,
"s": 32591,
"text": "Core Dump (Segmentation fault) in C/C++"
},
{
"code": null,
"e": 32659,
"s": 32631,
"text": "rand() and srand() in C/C++"
},
{
"code": null,
"e": 32684,
"s": 32659,
"text": "std::string class in C++"
},
{
"code": null,
"e": 32696,
"s": 32684,
"text": "fork() in C"
},
{
"code": null,
"e": 32728,
"s": 32696,
"text": "Command line arguments in C/C++"
}
] |
NLP | Filtering Insignificant Words - GeeksforGeeks
|
26 Feb, 2019
Many of the words used in the phrase are insignificant and hold no meaning. For example β English is a subject. Here, βEnglishβ and βsubjectβ are the most significant words and βisβ, βaβ are almost useless. English subject and subject English holds the same meaning even if we remove the insignificant words β (βisβ, βaβ). Using the nltk, we can remove the insignificant words by looking at their part-of-speech tags. For that we have to decide which Part-Of-Speech tags are significant.
Code #1 : filter_insignificant() class to filter out the insignificant words
def filter_insignificant(chunk, tag_suffixes =['DT', 'CC']): good = [] for word, tag in chunk: ok = True for suffix in tag_suffixes: if tag.endswith(suffix): ok = False break if ok: good.append((word, tag)) return good
filter_insignificant() checks whether that tag ends(for each tag) with the tag_suffixes by iterating over the tagged words in the chunk. The tagged word is skipped if tag ends with any of the tag_suffixes. Else if the tag is ok, the tagged word is appended to a new good chunk that is returned.
Code #2 : Using filter_insignificant() on a phrase
from transforms import filter_insignificant print ("Significant words : \n", filter_insignificant([('the', 'DT'), ('terrible', 'JJ'), ('movie', 'NN')]))
Output :
Significant words :
[('terrible', 'JJ'), ('movie', 'NN')]
We can pass out different tag suffixes using filter_insignificant(). In the code below we are talking about pronouns and possessive words such as your, you, their and theirs are no good, but DT and CC words are ok. The tag suffixes would then be PRP and PRP$: Code #3 : Passing in our own tag suffixes using filter_insignificant()
from transforms import filter_insignificant # choosing tag_suffixesprint ("Significant words : \n", filter_insignificant([('your', 'PRP$'), ('book', 'NN'), ('is', 'VBZ'), ('great', 'JJ')], tag_suffixes = ['PRP', 'PRP$']))
Output :
Significant words :
[('book', 'NN'), ('is', 'VBZ'), ('great', 'JJ')]
Natural-language-processing
Python-nltk
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Install PIP on Windows ?
Check if element exists in list in Python
How To Convert Python Dictionary To JSON?
How to drop one or multiple columns in Pandas Dataframe
Python Classes and Objects
Python | Get unique values from a list
Python | os.path.join() method
Create a directory in Python
Defaultdict in Python
Python | Pandas dataframe.groupby()
|
[
{
"code": null,
"e": 25537,
"s": 25509,
"text": "\n26 Feb, 2019"
},
{
"code": null,
"e": 26025,
"s": 25537,
"text": "Many of the words used in the phrase are insignificant and hold no meaning. For example β English is a subject. Here, βEnglishβ and βsubjectβ are the most significant words and βisβ, βaβ are almost useless. English subject and subject English holds the same meaning even if we remove the insignificant words β (βisβ, βaβ). Using the nltk, we can remove the insignificant words by looking at their part-of-speech tags. For that we have to decide which Part-Of-Speech tags are significant."
},
{
"code": null,
"e": 26102,
"s": 26025,
"text": "Code #1 : filter_insignificant() class to filter out the insignificant words"
},
{
"code": "def filter_insignificant(chunk, tag_suffixes =['DT', 'CC']): good = [] for word, tag in chunk: ok = True for suffix in tag_suffixes: if tag.endswith(suffix): ok = False break if ok: good.append((word, tag)) return good",
"e": 26448,
"s": 26102,
"text": null
},
{
"code": null,
"e": 26743,
"s": 26448,
"text": "filter_insignificant() checks whether that tag ends(for each tag) with the tag_suffixes by iterating over the tagged words in the chunk. The tagged word is skipped if tag ends with any of the tag_suffixes. Else if the tag is ok, the tagged word is appended to a new good chunk that is returned."
},
{
"code": null,
"e": 26794,
"s": 26743,
"text": "Code #2 : Using filter_insignificant() on a phrase"
},
{
"code": "from transforms import filter_insignificant print (\"Significant words : \\n\", filter_insignificant([('the', 'DT'), ('terrible', 'JJ'), ('movie', 'NN')]))",
"e": 26984,
"s": 26794,
"text": null
},
{
"code": null,
"e": 26993,
"s": 26984,
"text": "Output :"
},
{
"code": null,
"e": 27053,
"s": 26993,
"text": "Significant words : \n[('terrible', 'JJ'), ('movie', 'NN')]\n"
},
{
"code": null,
"e": 27384,
"s": 27053,
"text": "We can pass out different tag suffixes using filter_insignificant(). In the code below we are talking about pronouns and possessive words such as your, you, their and theirs are no good, but DT and CC words are ok. The tag suffixes would then be PRP and PRP$: Code #3 : Passing in our own tag suffixes using filter_insignificant()"
},
{
"code": "from transforms import filter_insignificant # choosing tag_suffixesprint (\"Significant words : \\n\", filter_insignificant([('your', 'PRP$'), ('book', 'NN'), ('is', 'VBZ'), ('great', 'JJ')], tag_suffixes = ['PRP', 'PRP$']))",
"e": 27680,
"s": 27384,
"text": null
},
{
"code": null,
"e": 27689,
"s": 27680,
"text": "Output :"
},
{
"code": null,
"e": 27760,
"s": 27689,
"text": "Significant words : \n[('book', 'NN'), ('is', 'VBZ'), ('great', 'JJ')]\n"
},
{
"code": null,
"e": 27788,
"s": 27760,
"text": "Natural-language-processing"
},
{
"code": null,
"e": 27800,
"s": 27788,
"text": "Python-nltk"
},
{
"code": null,
"e": 27807,
"s": 27800,
"text": "Python"
},
{
"code": null,
"e": 27905,
"s": 27807,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27937,
"s": 27905,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 27979,
"s": 27937,
"text": "Check if element exists in list in Python"
},
{
"code": null,
"e": 28021,
"s": 27979,
"text": "How To Convert Python Dictionary To JSON?"
},
{
"code": null,
"e": 28077,
"s": 28021,
"text": "How to drop one or multiple columns in Pandas Dataframe"
},
{
"code": null,
"e": 28104,
"s": 28077,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 28143,
"s": 28104,
"text": "Python | Get unique values from a list"
},
{
"code": null,
"e": 28174,
"s": 28143,
"text": "Python | os.path.join() method"
},
{
"code": null,
"e": 28203,
"s": 28174,
"text": "Create a directory in Python"
},
{
"code": null,
"e": 28225,
"s": 28203,
"text": "Defaultdict in Python"
}
] |
Count of Isogram strings in given Array of Strings - GeeksforGeeks
|
16 Dec, 2021
Given an array arr[] containing N strings, the task is to find the count of strings which are isograms. A string is an isogram if no letter in that string appears more than once.
Examples:
Input: arr[] = {βabcdβ, βdergβ, βertyβ}Output: 3Explanation: All given strings are isograms. In all the strings no character is present more than once. Hence count is 3
Input: arr[] = {βagkaβ, βlkmnβ}Output: 1Explanation: Only string βlkmnβ is isogram. In the string βagkaβ the character βaβ is present twice. Hence count is 1.
Approach: Greedy approach can be used for solving this problem. Traverse each string in the given string array and check if that is isogram or not. To do that follow the steps mentioned below:
Traverse the array of string and follow the below steps for each string:
Create a frequency map of characters.
Wherever any character has a frequency greater than 1, skip the current string and move to the next one.
If no character has frequency more than 1, increment the count of answer by 1.
Return the count stored in answer when all the strings are traversed.
Below is the implementation of the above approach:
C++
Java
Python3
C#
Javascript
#include <bits/stdc++.h>using namespace std; // Function to check// if a string is an isogrambool isIsogram(string s){ // Loop to check // if string is isogram or not vector<int> freq(26, 0); for (char c : s) { freq++; if (freq > 1) { return false; } } return true;} // Function to count the number of isogramsint countIsograms(vector<string>& arr){ int ans = 0; // Loop to iterate the string array for (string x : arr) { if (isIsogram(x)) { ans++; } } return ans;} // Driver Codeint main(){ vector<string> arr = { "abcd", "derg", "erty" }; // Count of isograms in string array arr[] cout << countIsograms(arr) << endl; return 0;}
// Java program for the above approachimport java.util.ArrayList; class GFG { // Function to check // if a String is an isogram static boolean isIsogram(String s) { // Loop to check // if String is isogram or not int[] freq = new int[26]; for (int i = 0; i < 26; i++) { freq[i] = 0; } for (char c : s.toCharArray()) { freq++; if (freq > 1) { return false; } } return true; } // Function to count the number of isograms static int countIsograms(ArrayList<String> arr) { int ans = 0; // Loop to iterate the String array for (String x : arr) { if (isIsogram(x)) { ans++; } } return ans; } // Driver Code public static void main(String args[]) { ArrayList<String> arr = new ArrayList<String>(); arr.add("abcd"); arr.add("derg"); arr.add("erty"); // Count of isograms in String array arr[] System.out.println(countIsograms(arr)); }} // This code is contributed by gfgking
# Function to check# if a string is an isogramdef isIsogram(s): # Loop to check # if string is isogram or not freq = [0]*(26) for c in s: freq[ord(c) - ord('a')] += 1 if (freq[ord(c) - ord('a')] > 1): return False return True # Function to count the number of isogramsdef countIsograms(arr): ans = 0 # Loop to iterate the string array for x in arr: if (isIsogram(x)): ans += 1 return ans # Driver Codeif __name__ == "__main__": arr = ["abcd", "derg", "erty"] # Count of isograms in string array arr[] print(countIsograms(arr)) # This code is contributed by ukasp.
// C# program for the above approachusing System;using System.Collections; class GFG{ // Function to check// if a string is an isogramstatic bool isIsogram(string s){ // Loop to check // if string is isogram or not int []freq = new int[26]; for(int i = 0; i < 26; i++) { freq[i] = 0; } foreach (char c in s) { freq++; if (freq > 1) { return false; } } return true;} // Function to count the number of isogramsstatic int countIsograms(ArrayList arr){ int ans = 0; // Loop to iterate the string array foreach (string x in arr) { if (isIsogram(x)) { ans++; } } return ans;} // Driver Codepublic static void Main(){ ArrayList arr = new ArrayList(); arr.Add("abcd"); arr.Add("derg"); arr.Add("erty"); // Count of isograms in string array arr[] Console.WriteLine(countIsograms(arr));}} // This code is contributed by Samim Hossain Mondal.
<script> // Function to check // if a string is an isogram const isIsogram = (s) => { // Loop to check // if string is isogram or not let freq = new Array(26).fill(0); for (let c in s) { freq[s.charCodeAt(c) - "0".charCodeAt(0)]++; if (freq > 1) { return false; } } return true; } // Function to count the number of isograms const countIsograms = (arr) => { let ans = 0; // Loop to iterate the string array for (let x in arr) { if (isIsogram(x)) { ans++; } } return ans; } // Driver Code let arr = ["abcd", "derg", "erty"]; // Count of isograms in string array arr[] document.write(countIsograms(arr)); // This code is contributed by rakeshsahni </script>
3
Time Complexity: O(N*M), where N is the size of the array and M is the size of the longest stringAuxiliary Space: O(1)
rakeshsahni
samim2000
ukasp
gfgking
frequency-counting
Arrays
Hash
Strings
Arrays
Hash
Strings
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Program for Bridge and Torch problem
Window Sliding Technique
Binary Tree (Array implementation)
Segment Tree | Set 1 (Sum of given range)
MSD( Most Significant Digit ) Radix Sort
Internal Working of HashMap in Java
Count pairs with given sum
Hashing | Set 1 (Introduction)
Hashing | Set 3 (Open Addressing)
Hashing | Set 2 (Separate Chaining)
|
[
{
"code": null,
"e": 26041,
"s": 26013,
"text": "\n16 Dec, 2021"
},
{
"code": null,
"e": 26220,
"s": 26041,
"text": "Given an array arr[] containing N strings, the task is to find the count of strings which are isograms. A string is an isogram if no letter in that string appears more than once."
},
{
"code": null,
"e": 26230,
"s": 26220,
"text": "Examples:"
},
{
"code": null,
"e": 26399,
"s": 26230,
"text": "Input: arr[] = {βabcdβ, βdergβ, βertyβ}Output: 3Explanation: All given strings are isograms. In all the strings no character is present more than once. Hence count is 3"
},
{
"code": null,
"e": 26558,
"s": 26399,
"text": "Input: arr[] = {βagkaβ, βlkmnβ}Output: 1Explanation: Only string βlkmnβ is isogram. In the string βagkaβ the character βaβ is present twice. Hence count is 1."
},
{
"code": null,
"e": 26751,
"s": 26558,
"text": "Approach: Greedy approach can be used for solving this problem. Traverse each string in the given string array and check if that is isogram or not. To do that follow the steps mentioned below:"
},
{
"code": null,
"e": 26824,
"s": 26751,
"text": "Traverse the array of string and follow the below steps for each string:"
},
{
"code": null,
"e": 26862,
"s": 26824,
"text": "Create a frequency map of characters."
},
{
"code": null,
"e": 26967,
"s": 26862,
"text": "Wherever any character has a frequency greater than 1, skip the current string and move to the next one."
},
{
"code": null,
"e": 27046,
"s": 26967,
"text": "If no character has frequency more than 1, increment the count of answer by 1."
},
{
"code": null,
"e": 27116,
"s": 27046,
"text": "Return the count stored in answer when all the strings are traversed."
},
{
"code": null,
"e": 27167,
"s": 27116,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 27171,
"s": 27167,
"text": "C++"
},
{
"code": null,
"e": 27176,
"s": 27171,
"text": "Java"
},
{
"code": null,
"e": 27184,
"s": 27176,
"text": "Python3"
},
{
"code": null,
"e": 27187,
"s": 27184,
"text": "C#"
},
{
"code": null,
"e": 27198,
"s": 27187,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std; // Function to check// if a string is an isogrambool isIsogram(string s){ // Loop to check // if string is isogram or not vector<int> freq(26, 0); for (char c : s) { freq++; if (freq > 1) { return false; } } return true;} // Function to count the number of isogramsint countIsograms(vector<string>& arr){ int ans = 0; // Loop to iterate the string array for (string x : arr) { if (isIsogram(x)) { ans++; } } return ans;} // Driver Codeint main(){ vector<string> arr = { \"abcd\", \"derg\", \"erty\" }; // Count of isograms in string array arr[] cout << countIsograms(arr) << endl; return 0;}",
"e": 27933,
"s": 27198,
"text": null
},
{
"code": "// Java program for the above approachimport java.util.ArrayList; class GFG { // Function to check // if a String is an isogram static boolean isIsogram(String s) { // Loop to check // if String is isogram or not int[] freq = new int[26]; for (int i = 0; i < 26; i++) { freq[i] = 0; } for (char c : s.toCharArray()) { freq++; if (freq > 1) { return false; } } return true; } // Function to count the number of isograms static int countIsograms(ArrayList<String> arr) { int ans = 0; // Loop to iterate the String array for (String x : arr) { if (isIsogram(x)) { ans++; } } return ans; } // Driver Code public static void main(String args[]) { ArrayList<String> arr = new ArrayList<String>(); arr.add(\"abcd\"); arr.add(\"derg\"); arr.add(\"erty\"); // Count of isograms in String array arr[] System.out.println(countIsograms(arr)); }} // This code is contributed by gfgking",
"e": 29067,
"s": 27933,
"text": null
},
{
"code": "# Function to check# if a string is an isogramdef isIsogram(s): # Loop to check # if string is isogram or not freq = [0]*(26) for c in s: freq[ord(c) - ord('a')] += 1 if (freq[ord(c) - ord('a')] > 1): return False return True # Function to count the number of isogramsdef countIsograms(arr): ans = 0 # Loop to iterate the string array for x in arr: if (isIsogram(x)): ans += 1 return ans # Driver Codeif __name__ == \"__main__\": arr = [\"abcd\", \"derg\", \"erty\"] # Count of isograms in string array arr[] print(countIsograms(arr)) # This code is contributed by ukasp.",
"e": 29720,
"s": 29067,
"text": null
},
{
"code": "// C# program for the above approachusing System;using System.Collections; class GFG{ // Function to check// if a string is an isogramstatic bool isIsogram(string s){ // Loop to check // if string is isogram or not int []freq = new int[26]; for(int i = 0; i < 26; i++) { freq[i] = 0; } foreach (char c in s) { freq++; if (freq > 1) { return false; } } return true;} // Function to count the number of isogramsstatic int countIsograms(ArrayList arr){ int ans = 0; // Loop to iterate the string array foreach (string x in arr) { if (isIsogram(x)) { ans++; } } return ans;} // Driver Codepublic static void Main(){ ArrayList arr = new ArrayList(); arr.Add(\"abcd\"); arr.Add(\"derg\"); arr.Add(\"erty\"); // Count of isograms in string array arr[] Console.WriteLine(countIsograms(arr));}} // This code is contributed by Samim Hossain Mondal.",
"e": 30693,
"s": 29720,
"text": null
},
{
"code": "<script> // Function to check // if a string is an isogram const isIsogram = (s) => { // Loop to check // if string is isogram or not let freq = new Array(26).fill(0); for (let c in s) { freq[s.charCodeAt(c) - \"0\".charCodeAt(0)]++; if (freq > 1) { return false; } } return true; } // Function to count the number of isograms const countIsograms = (arr) => { let ans = 0; // Loop to iterate the string array for (let x in arr) { if (isIsogram(x)) { ans++; } } return ans; } // Driver Code let arr = [\"abcd\", \"derg\", \"erty\"]; // Count of isograms in string array arr[] document.write(countIsograms(arr)); // This code is contributed by rakeshsahni </script>",
"e": 31558,
"s": 30693,
"text": null
},
{
"code": null,
"e": 31563,
"s": 31561,
"text": "3"
},
{
"code": null,
"e": 31684,
"s": 31565,
"text": "Time Complexity: O(N*M), where N is the size of the array and M is the size of the longest stringAuxiliary Space: O(1)"
},
{
"code": null,
"e": 31698,
"s": 31686,
"text": "rakeshsahni"
},
{
"code": null,
"e": 31708,
"s": 31698,
"text": "samim2000"
},
{
"code": null,
"e": 31714,
"s": 31708,
"text": "ukasp"
},
{
"code": null,
"e": 31722,
"s": 31714,
"text": "gfgking"
},
{
"code": null,
"e": 31741,
"s": 31722,
"text": "frequency-counting"
},
{
"code": null,
"e": 31748,
"s": 31741,
"text": "Arrays"
},
{
"code": null,
"e": 31753,
"s": 31748,
"text": "Hash"
},
{
"code": null,
"e": 31761,
"s": 31753,
"text": "Strings"
},
{
"code": null,
"e": 31768,
"s": 31761,
"text": "Arrays"
},
{
"code": null,
"e": 31773,
"s": 31768,
"text": "Hash"
},
{
"code": null,
"e": 31781,
"s": 31773,
"text": "Strings"
},
{
"code": null,
"e": 31879,
"s": 31781,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 31916,
"s": 31879,
"text": "Program for Bridge and Torch problem"
},
{
"code": null,
"e": 31941,
"s": 31916,
"text": "Window Sliding Technique"
},
{
"code": null,
"e": 31976,
"s": 31941,
"text": "Binary Tree (Array implementation)"
},
{
"code": null,
"e": 32018,
"s": 31976,
"text": "Segment Tree | Set 1 (Sum of given range)"
},
{
"code": null,
"e": 32059,
"s": 32018,
"text": "MSD( Most Significant Digit ) Radix Sort"
},
{
"code": null,
"e": 32095,
"s": 32059,
"text": "Internal Working of HashMap in Java"
},
{
"code": null,
"e": 32122,
"s": 32095,
"text": "Count pairs with given sum"
},
{
"code": null,
"e": 32153,
"s": 32122,
"text": "Hashing | Set 1 (Introduction)"
},
{
"code": null,
"e": 32187,
"s": 32153,
"text": "Hashing | Set 3 (Open Addressing)"
}
] |
Check whether two straight lines are orthogonal or not
|
23 Jun, 2022
Given two line segments AB and CD having A(x1, y1), B(x2, y2), C(x3, y3) and D(x4, y4). The task is to check whether these two lines are orthogonal or not. Two lines are called orthogonal if they are perpendicular at the point of intersection.
Examples:
Input: x1 = 0, y1 = 3, x2 = 0, y2 = -5
x3 = 2, y3 = 0, x4 = -1, y4 = 0
Output: Yes
Input: x1 = 0, y1 = 4, x2 = 0, y2 = -9
x3 = 2, y3 = 0, x4 = -1, y4 = 0
Output: Yes
Approach: If the slopes of the two lines are m1 and m2 then for them to be orthogonal we need to check if:
Both lines have infinite slope then answer is no.
One line has infinite slope and if other line has 0 slope then answer is yes otherwise no.
Both lines have finite slope and their product is -1 then the answer is yes.
Below is the implementation of the above approach:
C++
Java
Python3
C#
PHP
Javascript
// C++ implementation of above approach#include <bits/stdc++.h>using namespace std; // Function to check if two straight// lines are orthogonal or notbool checkOrtho(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4){ int m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = (y4 - y3) / (x4 - x3); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = (y2 - y1) / (x2 - x1); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = (y2 - y1) / (x2 - x1); m2 = (y4 - y3) / (x4 - x3); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; }} // Driver codeint main(){ int x1 = 0, y1 = 4, x2 = 0, y2 = -9; int x3 = 2, y3 = 0, x4 = -1, y4 = 0; checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) ? cout << "Yes" : cout << "No"; return 0;}
//Java implementation of above approach import java.io.*; class GFG { // Function to check if two straight // lines are orthogonal or not static boolean checkOrtho(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4) { int m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = (y4 - y3) / (x4 - x3); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = (y2 - y1) / (x2 - x1); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = (y2 - y1) / (x2 - x1); m2 = (y4 - y3) / (x4 - x3); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; } } // Driver code public static void main (String[] args) { int x1 = 0, y1 = 4, x2 = 0, y2 = -9; int x3 = 2, y3 = 0, x4 = -1, y4 = 0; if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4)==true) System.out.println ("Yes"); else System.out.println("No" ); }} //This code is contributed by akt_mit..
# Python 3 implementation of above approach # Function to check if two straight# lines are orthogonal or notdef checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4): # Both lines have infinite slope if (x2 - x1 == 0 and x4 - x3 == 0): return False # Only line 1 has infinite slope elif (x2 - x1 == 0): m2 = (y4 - y3) / (x4 - x3) if (m2 == 0): return True else: return False # Only line 2 has infinite slope elif (x4 - x3 == 0): m1 = (y2 - y1) / (x2 - x1); if (m1 == 0): return True else: return False else: # Find slopes of the lines m1 = (y2 - y1) / (x2 - x1) m2 = (y4 - y3) / (x4 - x3) # Check if their product is -1 if (m1 * m2 == -1): return True else: return False # Driver codeif __name__ == '__main__': x1 = 0 y1 = 4 x2 = 0 y2 = -9 x3 = 2 y3 = 0 x4 = -1 y4 = 0 if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4)): print("Yes") else: print("No") # This code is contributed by# Shashank_Sharma
// C# implementation of above approachusing System; class GFG{ // Function to check if two straight // lines are orthogonal or not static bool checkOrtho(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4) { int m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = (y4 - y3) / (x4 - x3); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = (y2 - y1) / (x2 - x1); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = (y2 - y1) / (x2 - x1); m2 = (y4 - y3) / (x4 - x3); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; } } // Driver code public static void Main () { int x1 = 0, y1 = 4, x2 = 0, y2 = -9; int x3 = 2, y3 = 0, x4 = -1, y4 = 0; if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) == true) Console.WriteLine("Yes"); else Console.WriteLine("No" ); }} // This code is contributed by Ryuga
<?php// PHP implementation of above approach // Function to check if two straight// lines are orthogonal or notfunction checkOrtho($x1, $y1, $x2, $y2, $x3, $y3, $x4, $y4){ // Both lines have infinite slope if ($x2 - $x1 == 0 && $x4 - $x3 == 0) return false; // Only line 1 has infinite slope else if ($x2 - $x1 == 0) { $m2 = (int)(($y4 - $y3) / ($x4 - $x3)); if ($m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if ($x4 - $x3 == 0) { $m1 = (int)(($y2 - $y1) / ($x2 - $x1)); if ($m1 == 0) return true; else return false; } else { // Find slopes of the lines $m1 = (int)(($y2 - $y1) / ($x2 - $x1)); $m2 = (int)(($y4 - $y3) / ($x4 - $x3)); // Check if their product is -1 if ($m1 * $m2 == -1) return true; else return false; }} // Driver code$x1 = 0; $y1 = 4;$x2 = 0; $y2 = -9;$x3 = 2; $y3 = 0;$x4 = -1; $y4 = 0; if(checkOrtho($x1, $y1, $x2, $y2, $x3, $y3, $x4, $y4)) print("Yes");else print("No"); // This code is contributed by chandan_jnu?>
<script> // Javascript implementation of above approach // Function to check if two straight // lines are orthogonal or not function checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) { let m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = parseInt((y4 - y3) / (x4 - x3), 10); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = parseInt((y2 - y1) / (x2 - x1), 10); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = parseInt((y2 - y1) / (x2 - x1), 10); m2 = parseInt((y4 - y3) / (x4 - x3), 10); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; } } let x1 = 0, y1 = 4, x2 = 0, y2 = -9; let x3 = 2, y3 = 0, x4 = -1, y4 = 0; if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) == true) document.write("Yes"); else document.write("No" ); </script>
Yes
Time Complexity: O(1)
Auxiliary Space: O(1)
jit_t
Shashank_Sharma
ankthon
Chandan_Kumar
divyeshrabadiya07
sushmitamittal1329
Geometric-Lines
Geometric
Mathematical
Mathematical
Geometric
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 53,
"s": 25,
"text": "\n23 Jun, 2022"
},
{
"code": null,
"e": 298,
"s": 53,
"text": "Given two line segments AB and CD having A(x1, y1), B(x2, y2), C(x3, y3) and D(x4, y4). The task is to check whether these two lines are orthogonal or not. Two lines are called orthogonal if they are perpendicular at the point of intersection. "
},
{
"code": null,
"e": 310,
"s": 298,
"text": "Examples: "
},
{
"code": null,
"e": 494,
"s": 310,
"text": "Input: x1 = 0, y1 = 3, x2 = 0, y2 = -5\n x3 = 2, y3 = 0, x4 = -1, y4 = 0\nOutput: Yes\n\nInput: x1 = 0, y1 = 4, x2 = 0, y2 = -9\n x3 = 2, y3 = 0, x4 = -1, y4 = 0\nOutput: Yes"
},
{
"code": null,
"e": 605,
"s": 496,
"text": "Approach: If the slopes of the two lines are m1 and m2 then for them to be orthogonal we need to check if: "
},
{
"code": null,
"e": 655,
"s": 605,
"text": "Both lines have infinite slope then answer is no."
},
{
"code": null,
"e": 746,
"s": 655,
"text": "One line has infinite slope and if other line has 0 slope then answer is yes otherwise no."
},
{
"code": null,
"e": 823,
"s": 746,
"text": "Both lines have finite slope and their product is -1 then the answer is yes."
},
{
"code": null,
"e": 875,
"s": 823,
"text": "Below is the implementation of the above approach: "
},
{
"code": null,
"e": 879,
"s": 875,
"text": "C++"
},
{
"code": null,
"e": 884,
"s": 879,
"text": "Java"
},
{
"code": null,
"e": 892,
"s": 884,
"text": "Python3"
},
{
"code": null,
"e": 895,
"s": 892,
"text": "C#"
},
{
"code": null,
"e": 899,
"s": 895,
"text": "PHP"
},
{
"code": null,
"e": 910,
"s": 899,
"text": "Javascript"
},
{
"code": "// C++ implementation of above approach#include <bits/stdc++.h>using namespace std; // Function to check if two straight// lines are orthogonal or notbool checkOrtho(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4){ int m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = (y4 - y3) / (x4 - x3); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = (y2 - y1) / (x2 - x1); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = (y2 - y1) / (x2 - x1); m2 = (y4 - y3) / (x4 - x3); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; }} // Driver codeint main(){ int x1 = 0, y1 = 4, x2 = 0, y2 = -9; int x3 = 2, y3 = 0, x4 = -1, y4 = 0; checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) ? cout << \"Yes\" : cout << \"No\"; return 0;}",
"e": 2141,
"s": 910,
"text": null
},
{
"code": "//Java implementation of above approach import java.io.*; class GFG { // Function to check if two straight // lines are orthogonal or not static boolean checkOrtho(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4) { int m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = (y4 - y3) / (x4 - x3); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = (y2 - y1) / (x2 - x1); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = (y2 - y1) / (x2 - x1); m2 = (y4 - y3) / (x4 - x3); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; } } // Driver code public static void main (String[] args) { int x1 = 0, y1 = 4, x2 = 0, y2 = -9; int x3 = 2, y3 = 0, x4 = -1, y4 = 0; if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4)==true) System.out.println (\"Yes\"); else System.out.println(\"No\" ); }} //This code is contributed by akt_mit..",
"e": 3643,
"s": 2141,
"text": null
},
{
"code": "# Python 3 implementation of above approach # Function to check if two straight# lines are orthogonal or notdef checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4): # Both lines have infinite slope if (x2 - x1 == 0 and x4 - x3 == 0): return False # Only line 1 has infinite slope elif (x2 - x1 == 0): m2 = (y4 - y3) / (x4 - x3) if (m2 == 0): return True else: return False # Only line 2 has infinite slope elif (x4 - x3 == 0): m1 = (y2 - y1) / (x2 - x1); if (m1 == 0): return True else: return False else: # Find slopes of the lines m1 = (y2 - y1) / (x2 - x1) m2 = (y4 - y3) / (x4 - x3) # Check if their product is -1 if (m1 * m2 == -1): return True else: return False # Driver codeif __name__ == '__main__': x1 = 0 y1 = 4 x2 = 0 y2 = -9 x3 = 2 y3 = 0 x4 = -1 y4 = 0 if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4)): print(\"Yes\") else: print(\"No\") # This code is contributed by# Shashank_Sharma",
"e": 4795,
"s": 3643,
"text": null
},
{
"code": "// C# implementation of above approachusing System; class GFG{ // Function to check if two straight // lines are orthogonal or not static bool checkOrtho(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4) { int m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = (y4 - y3) / (x4 - x3); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = (y2 - y1) / (x2 - x1); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = (y2 - y1) / (x2 - x1); m2 = (y4 - y3) / (x4 - x3); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; } } // Driver code public static void Main () { int x1 = 0, y1 = 4, x2 = 0, y2 = -9; int x3 = 2, y3 = 0, x4 = -1, y4 = 0; if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) == true) Console.WriteLine(\"Yes\"); else Console.WriteLine(\"No\" ); }} // This code is contributed by Ryuga",
"e": 6269,
"s": 4795,
"text": null
},
{
"code": "<?php// PHP implementation of above approach // Function to check if two straight// lines are orthogonal or notfunction checkOrtho($x1, $y1, $x2, $y2, $x3, $y3, $x4, $y4){ // Both lines have infinite slope if ($x2 - $x1 == 0 && $x4 - $x3 == 0) return false; // Only line 1 has infinite slope else if ($x2 - $x1 == 0) { $m2 = (int)(($y4 - $y3) / ($x4 - $x3)); if ($m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if ($x4 - $x3 == 0) { $m1 = (int)(($y2 - $y1) / ($x2 - $x1)); if ($m1 == 0) return true; else return false; } else { // Find slopes of the lines $m1 = (int)(($y2 - $y1) / ($x2 - $x1)); $m2 = (int)(($y4 - $y3) / ($x4 - $x3)); // Check if their product is -1 if ($m1 * $m2 == -1) return true; else return false; }} // Driver code$x1 = 0; $y1 = 4;$x2 = 0; $y2 = -9;$x3 = 2; $y3 = 0;$x4 = -1; $y4 = 0; if(checkOrtho($x1, $y1, $x2, $y2, $x3, $y3, $x4, $y4)) print(\"Yes\");else print(\"No\"); // This code is contributed by chandan_jnu?>",
"e": 7494,
"s": 6269,
"text": null
},
{
"code": "<script> // Javascript implementation of above approach // Function to check if two straight // lines are orthogonal or not function checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) { let m1, m2; // Both lines have infinite slope if (x2 - x1 == 0 && x4 - x3 == 0) return false; // Only line 1 has infinite slope else if (x2 - x1 == 0) { m2 = parseInt((y4 - y3) / (x4 - x3), 10); if (m2 == 0) return true; else return false; } // Only line 2 has infinite slope else if (x4 - x3 == 0) { m1 = parseInt((y2 - y1) / (x2 - x1), 10); if (m1 == 0) return true; else return false; } else { // Find slopes of the lines m1 = parseInt((y2 - y1) / (x2 - x1), 10); m2 = parseInt((y4 - y3) / (x4 - x3), 10); // Check if their product is -1 if (m1 * m2 == -1) return true; else return false; } } let x1 = 0, y1 = 4, x2 = 0, y2 = -9; let x3 = 2, y3 = 0, x4 = -1, y4 = 0; if(checkOrtho(x1, y1, x2, y2, x3, y3, x4, y4) == true) document.write(\"Yes\"); else document.write(\"No\" ); </script>",
"e": 8862,
"s": 7494,
"text": null
},
{
"code": null,
"e": 8866,
"s": 8862,
"text": "Yes"
},
{
"code": null,
"e": 8890,
"s": 8868,
"text": "Time Complexity: O(1)"
},
{
"code": null,
"e": 8912,
"s": 8890,
"text": "Auxiliary Space: O(1)"
},
{
"code": null,
"e": 8918,
"s": 8912,
"text": "jit_t"
},
{
"code": null,
"e": 8934,
"s": 8918,
"text": "Shashank_Sharma"
},
{
"code": null,
"e": 8942,
"s": 8934,
"text": "ankthon"
},
{
"code": null,
"e": 8956,
"s": 8942,
"text": "Chandan_Kumar"
},
{
"code": null,
"e": 8974,
"s": 8956,
"text": "divyeshrabadiya07"
},
{
"code": null,
"e": 8993,
"s": 8974,
"text": "sushmitamittal1329"
},
{
"code": null,
"e": 9009,
"s": 8993,
"text": "Geometric-Lines"
},
{
"code": null,
"e": 9019,
"s": 9009,
"text": "Geometric"
},
{
"code": null,
"e": 9032,
"s": 9019,
"text": "Mathematical"
},
{
"code": null,
"e": 9045,
"s": 9032,
"text": "Mathematical"
},
{
"code": null,
"e": 9055,
"s": 9045,
"text": "Geometric"
}
] |
Year() Function in MS Access
|
11 Sep, 2020
Year() : Function in Microsoft Access is used to return the year part of a given date.
Syntax :
Year (date)
Parameter : This method accepts one parameter as mentioned above and described below :
date : It is any variant, numeric expression, string expression, or any combination of these that can represent a date.
Returns : It Returns the year part of a specified date . If date contains Null, Null is returned.
Example-1 : Find Year from a specified Date :
SELECT Year(#05/17/2017#);
Output :
2017
Example-2 : Find Year part of Today :
SELECT Year(Date());
Output :
2020
Example-3 : Find Year of Joining of the Employee :
SELECT Year(JoiningDate) FROM Employees;
Output :
1990
2004
1992
Note : There Should be JoiningDate column in Employees Database.Here, the Employees Database contains only 3 records.
DBMS-SQL
SQL
SQL
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n11 Sep, 2020"
},
{
"code": null,
"e": 115,
"s": 28,
"text": "Year() : Function in Microsoft Access is used to return the year part of a given date."
},
{
"code": null,
"e": 124,
"s": 115,
"text": "Syntax :"
},
{
"code": null,
"e": 136,
"s": 124,
"text": "Year (date)"
},
{
"code": null,
"e": 223,
"s": 136,
"text": "Parameter : This method accepts one parameter as mentioned above and described below :"
},
{
"code": null,
"e": 343,
"s": 223,
"text": "date : It is any variant, numeric expression, string expression, or any combination of these that can represent a date."
},
{
"code": null,
"e": 441,
"s": 343,
"text": "Returns : It Returns the year part of a specified date . If date contains Null, Null is returned."
},
{
"code": null,
"e": 487,
"s": 441,
"text": "Example-1 : Find Year from a specified Date :"
},
{
"code": null,
"e": 515,
"s": 487,
"text": "SELECT Year(#05/17/2017#);\n"
},
{
"code": null,
"e": 524,
"s": 515,
"text": "Output :"
},
{
"code": null,
"e": 531,
"s": 524,
"text": "2017 \n"
},
{
"code": null,
"e": 569,
"s": 531,
"text": "Example-2 : Find Year part of Today :"
},
{
"code": null,
"e": 591,
"s": 569,
"text": "SELECT Year(Date());\n"
},
{
"code": null,
"e": 600,
"s": 591,
"text": "Output :"
},
{
"code": null,
"e": 606,
"s": 600,
"text": "2020\n"
},
{
"code": null,
"e": 657,
"s": 606,
"text": "Example-3 : Find Year of Joining of the Employee :"
},
{
"code": null,
"e": 699,
"s": 657,
"text": "SELECT Year(JoiningDate) FROM Employees;\n"
},
{
"code": null,
"e": 708,
"s": 699,
"text": "Output :"
},
{
"code": null,
"e": 724,
"s": 708,
"text": "1990\n2004\n1992\n"
},
{
"code": null,
"e": 842,
"s": 724,
"text": "Note : There Should be JoiningDate column in Employees Database.Here, the Employees Database contains only 3 records."
},
{
"code": null,
"e": 851,
"s": 842,
"text": "DBMS-SQL"
},
{
"code": null,
"e": 855,
"s": 851,
"text": "SQL"
},
{
"code": null,
"e": 859,
"s": 855,
"text": "SQL"
}
] |
How to wrap text within Tkinter Text Box?
|
26 Mar, 2021
In this article, we will see that how can we wrap the text in the TKinter Text-Box using the Tkinter module Called textWrap Module. The textwrap module can be used for wrapping and formatting plain text. This module provides formatting of the text by adjusting the line breaks in the input paragraph.
Example 1:
Firstly We will import the Tkinter Library to the Code then we will declare it as a root for our Window then after declaring the size of the window we will call the function in the Tkinter called text() which will provide the Text box in that Window then declare the size of the Text Box then pack to the window or combined the textBox to the Window.
Below is the implementation:
Python3
# import tkinter module from tkinter import * # Create Objectroot = Tk() # Initialize tkinter window with dimensions 100x100 root.geometry('300x300') text=Text(root, width = 50, height = 50, padx = 10, pady = 10) # pack the text-Aera in the windowtext.pack() root.mainloop()
Output:
Example 2:
Here We have the Problem like at the end of the box when we write something then it breaks the text to the new line. Generally to wrap the text we use the Text(wrap=word). Here is the basic idea about how we use the word=wrap
Use the wrap=WORD option. Hereβs an example:
from tkinter import *
root = Tk()
t = Text(wrap=WORD)
t.pack()
root.mainloop()
Below is the implementation:
Python3
# import tkinter module from tkinter import * # Create Objectroot = Tk() # Initialize tkinter window with dimensions 100x100 root.geometry('300x300') text=Text(root, width = 50, height = 50, wrap = WORD, padx = 10, pady = 10) # pack the text-Aera in the windowtext.pack() root.mainloop()
Picked
Python-tkinter
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Install PIP on Windows ?
Introduction To PYTHON
Python OOPs Concepts
Python Classes and Objects
Python | os.path.join() method
How to drop one or multiple columns in Pandas Dataframe
Python - Pytorch randn() method
sys.path in Python
Check if element exists in list in Python
Python - Pandas dataframe.append()
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n26 Mar, 2021"
},
{
"code": null,
"e": 329,
"s": 28,
"text": "In this article, we will see that how can we wrap the text in the TKinter Text-Box using the Tkinter module Called textWrap Module. The textwrap module can be used for wrapping and formatting plain text. This module provides formatting of the text by adjusting the line breaks in the input paragraph."
},
{
"code": null,
"e": 340,
"s": 329,
"text": "Example 1:"
},
{
"code": null,
"e": 691,
"s": 340,
"text": "Firstly We will import the Tkinter Library to the Code then we will declare it as a root for our Window then after declaring the size of the window we will call the function in the Tkinter called text() which will provide the Text box in that Window then declare the size of the Text Box then pack to the window or combined the textBox to the Window."
},
{
"code": null,
"e": 720,
"s": 691,
"text": "Below is the implementation:"
},
{
"code": null,
"e": 728,
"s": 720,
"text": "Python3"
},
{
"code": "# import tkinter module from tkinter import * # Create Objectroot = Tk() # Initialize tkinter window with dimensions 100x100 root.geometry('300x300') text=Text(root, width = 50, height = 50, padx = 10, pady = 10) # pack the text-Aera in the windowtext.pack() root.mainloop()",
"e": 1073,
"s": 728,
"text": null
},
{
"code": null,
"e": 1081,
"s": 1073,
"text": "Output:"
},
{
"code": null,
"e": 1092,
"s": 1081,
"text": "Example 2:"
},
{
"code": null,
"e": 1318,
"s": 1092,
"text": "Here We have the Problem like at the end of the box when we write something then it breaks the text to the new line. Generally to wrap the text we use the Text(wrap=word). Here is the basic idea about how we use the word=wrap"
},
{
"code": null,
"e": 1363,
"s": 1318,
"text": "Use the wrap=WORD option. Hereβs an example:"
},
{
"code": null,
"e": 1442,
"s": 1363,
"text": "from tkinter import *\nroot = Tk()\nt = Text(wrap=WORD)\nt.pack()\nroot.mainloop()"
},
{
"code": null,
"e": 1471,
"s": 1442,
"text": "Below is the implementation:"
},
{
"code": null,
"e": 1479,
"s": 1471,
"text": "Python3"
},
{
"code": "# import tkinter module from tkinter import * # Create Objectroot = Tk() # Initialize tkinter window with dimensions 100x100 root.geometry('300x300') text=Text(root, width = 50, height = 50, wrap = WORD, padx = 10, pady = 10) # pack the text-Aera in the windowtext.pack() root.mainloop()",
"e": 1810,
"s": 1479,
"text": null
},
{
"code": null,
"e": 1817,
"s": 1810,
"text": "Picked"
},
{
"code": null,
"e": 1832,
"s": 1817,
"text": "Python-tkinter"
},
{
"code": null,
"e": 1839,
"s": 1832,
"text": "Python"
},
{
"code": null,
"e": 1937,
"s": 1839,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 1969,
"s": 1937,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 1992,
"s": 1969,
"text": "Introduction To PYTHON"
},
{
"code": null,
"e": 2013,
"s": 1992,
"text": "Python OOPs Concepts"
},
{
"code": null,
"e": 2040,
"s": 2013,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 2071,
"s": 2040,
"text": "Python | os.path.join() method"
},
{
"code": null,
"e": 2127,
"s": 2071,
"text": "How to drop one or multiple columns in Pandas Dataframe"
},
{
"code": null,
"e": 2159,
"s": 2127,
"text": "Python - Pytorch randn() method"
},
{
"code": null,
"e": 2178,
"s": 2159,
"text": "sys.path in Python"
},
{
"code": null,
"e": 2220,
"s": 2178,
"text": "Check if element exists in list in Python"
}
] |
Python | Making program run faster
|
17 Sep, 2020
As we know, Python programming language is a bit slow and the target is to speed it up without the assistance of more extreme solutions, such as C extensions or a just-in-time (JIT) compiler.While the first rule of optimization might be to βnot do itβ, the second rule is almost certainly βdonβt optimize the unimportant.β To that end, if the program is running slow, one might start by profiling the code. More often than not, one finds that the program spends its time in a few hotspots, such as inner data processing loops. Once those locations are identified, the no-nonsense techniques can be used to make the program run faster. A lot of programmers start using Python as a language for writing simple scripts. When writing scripts, it is easy to fall into a practice of simply writing code with very little structure.Code #1: Taking this code into consideration.
Python3
# abc.pyimport sysimport csv with open(sys.argv[1]) as f: for row in csv.reader(f): # Some kind of processing
A little-known fact is that code defined in the global scope like this runs slower than code defined in a function. The speed difference has to do with the implementation of local versus global variables (operations involving locals are faster). So, simply put the scripting statements in a function to make the program run faster. Code #2 :
Python3
# abc.pyimport sysimport csv def main(filename): with open(filename) as f: for row in csv.reader(f): # Some kind of processing main(sys.argv[1])
The speed difference depends heavily on the processing being performed, but the speedups of 15-30% are not uncommon.
Every use of the dot (.) operator to access attributes comes with a cost. Under the covers, this triggers special methods, such as __getattribute__() and __getattr__(), which often lead to dictionary lookups.One can often avoid attribute lookups by using the from module import name form of import as well as making selected use of bound methods as shown in the code fragment given below β Code #3 :
Python3
import math def compute_roots(nums): result = [] for n in nums: result.append(math.sqrt(n)) return result # Testnums = range(1000000)for n in range(100): r = compute_roots(nums)
Output :
This program runs in about 40 seconds when running on the machine.
Code #4 : Change the compute_roots() function
Python3
from math import sqrt def compute_roots(nums): result = [] result_append = result.append for n in nums: result_append(sqrt(n)) return result
Output :
This program runs in about 29 seconds when running on the machine.
The only difference between the two versions of code is the elimination of attribute access. Instead of using math.sqrt(), the code uses sqrt(). The result.append() method is additionally placed into a local variable re sult_append and reused in the inner loop. However, it must be emphasized that these changes only make sense in frequently executed code, such as loops. So, this optimization really only makes sense in carefully selected places.
As previously noted, local variables are faster than global variables. For frequently accessed names, speedups can be obtained by making those names as local as possible.Code #5 : Modified version of the compute_roots() function
Python3
import math def compute_roots(nums): sqrt = math.sqrt result = [] result_append = result.append for n in nums: result_append(sqrt(n)) return result
In this version, sqrt has been lifted from the math module and placed into a local variable. This code will run about 25 seconds (an improvement over the previous version, which took 29 seconds). That additional speedup is due to a local lookup of sqrt being a bit faster than a global lookup of sqrt. Locality arguments also apply when working in classes. In general, looking up a value such as self.name will be considerably slower than accessing a local variable. In inner loops, it might pay to lift commonly accessed attributes into a local variable as shown in the code given below. Code #6 :
Python3
# Slowerclass SomeClass: ... def method(self): for x in s: op(self.value)# Fasterclass SomeClass: ... def method(self): value = self.value for x in s: op(value)
The reason Python is slow is because itβs dynamically typed now weβre going to talk about this more in detail but I want to give a comparison to a language like Java. Now in Java, everything is statically typed and this language is actually compiled before it runs, unlike Python thatβs compiled at runtime through an interpreter. Now what happens in Java is when you write code, you need to define what type each of your variables is going to be, what type your methods and functions are going to be returning and you pretty much have to define exactly what everythingβs going to be throughout your code. Now although this leads to much longer development times and takes a much longer time to write your code but what it does is increase efficiency when you are compiling, now the reason this actually works and the reason it works so much faster than Python code is because if you know the type that a specific variable or object is going to be, you can perform a ton of different optimizations and avoid performing a ton of different checks while youβre actually running the code because these checks are performed at compile time in Java essentially you canβt compile any Java code that hasnβt actual or even just like typed errors while youβre writing that code you are going to try to compile it and it would say like this type isnβt accurate, you canβt do this, you canβt compile it because it knows that when it comes to runtime thatβs not going to work so essentially all of these checks that actually needs to be performed in Python when the code is running are performed beforehand and thereβs just a ton of optimization done because of this statically typed length. Now one may ask a question like, Why doesnβt Python do this? Answer to this would be Python is dynamically typed which simply means that any variable can change its type and can change itβs value at any point in the program while itβs running which means that we canβt actually compile the entire program beforehand because we canβt do all of these checks at once because we donβt know what type these variables are going to be, they are going to change at runtime, different things are going to happen and because of that we canβt get all these optimization that we might have in a lower level language like Java, C or C++ and that is kind of the fundamental reason the language is slow, this dynamic typing and any fast language is going to have a compiler thatβs going to run through, itβs going to make sure that everything is good, itβs going to do all these checks before it actually ends up running the code at runtime where what happens in Python is all of your code is actually compiled and checked at runtime so rather than compiling it before and taking all that time beforehand while youβre running the code , many different checks are happening to make sure that say this object is correct, these types are proper, everything is working the same.
Now the next thing to talk about is obviously the lack of concurrency in Python. This is going to be the major kind of factor on speed, if youβre writing an application in Java, C, you can spread everything out throughout multiple threads which allows you to utilize all the cores of your CPU so to break this down in modern-day computing most of us have four core CPUs or higher and that allows us to actually run four tasks at the exact same time concurrently now with Python this isnβt possible. Python says, well for each interpreter we can have at most one thread running at a time and a thread is just some kind of operation thatβs happening on the CPU core so that means even if we create many threads in our Python program we can only be using one CPU core while in a Java program or a C program could be using all eight or be using all four which will obviously lead to 4X or 8X increase in speed, now we can get around this in Python by using multiprocessing, but there are some issues with that.
rgndunes
python-utility
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n17 Sep, 2020"
},
{
"code": null,
"e": 900,
"s": 28,
"text": "As we know, Python programming language is a bit slow and the target is to speed it up without the assistance of more extreme solutions, such as C extensions or a just-in-time (JIT) compiler.While the first rule of optimization might be to βnot do itβ, the second rule is almost certainly βdonβt optimize the unimportant.β To that end, if the program is running slow, one might start by profiling the code. More often than not, one finds that the program spends its time in a few hotspots, such as inner data processing loops. Once those locations are identified, the no-nonsense techniques can be used to make the program run faster. A lot of programmers start using Python as a language for writing simple scripts. When writing scripts, it is easy to fall into a practice of simply writing code with very little structure.Code #1: Taking this code into consideration. "
},
{
"code": null,
"e": 908,
"s": 900,
"text": "Python3"
},
{
"code": "# abc.pyimport sysimport csv with open(sys.argv[1]) as f: for row in csv.reader(f): # Some kind of processing",
"e": 1028,
"s": 908,
"text": null
},
{
"code": null,
"e": 1374,
"s": 1028,
"text": "A little-known fact is that code defined in the global scope like this runs slower than code defined in a function. The speed difference has to do with the implementation of local versus global variables (operations involving locals are faster). So, simply put the scripting statements in a function to make the program run faster. Code #2 : "
},
{
"code": null,
"e": 1382,
"s": 1374,
"text": "Python3"
},
{
"code": "# abc.pyimport sysimport csv def main(filename): with open(filename) as f: for row in csv.reader(f): # Some kind of processing main(sys.argv[1])",
"e": 1544,
"s": 1382,
"text": null
},
{
"code": null,
"e": 1662,
"s": 1544,
"text": "The speed difference depends heavily on the processing being performed, but the speedups of 15-30% are not uncommon. "
},
{
"code": null,
"e": 2064,
"s": 1662,
"text": "Every use of the dot (.) operator to access attributes comes with a cost. Under the covers, this triggers special methods, such as __getattribute__() and __getattr__(), which often lead to dictionary lookups.One can often avoid attribute lookups by using the from module import name form of import as well as making selected use of bound methods as shown in the code fragment given below β Code #3 : "
},
{
"code": null,
"e": 2072,
"s": 2064,
"text": "Python3"
},
{
"code": "import math def compute_roots(nums): result = [] for n in nums: result.append(math.sqrt(n)) return result # Testnums = range(1000000)for n in range(100): r = compute_roots(nums)",
"e": 2269,
"s": 2072,
"text": null
},
{
"code": null,
"e": 2280,
"s": 2269,
"text": "Output : "
},
{
"code": null,
"e": 2350,
"s": 2280,
"text": "This program runs in about 40 seconds when running on the machine.\n\n\n"
},
{
"code": null,
"e": 2398,
"s": 2350,
"text": "Code #4 : Change the compute_roots() function "
},
{
"code": null,
"e": 2406,
"s": 2398,
"text": "Python3"
},
{
"code": "from math import sqrt def compute_roots(nums): result = [] result_append = result.append for n in nums: result_append(sqrt(n)) return result",
"e": 2566,
"s": 2406,
"text": null
},
{
"code": null,
"e": 2577,
"s": 2566,
"text": "Output : "
},
{
"code": null,
"e": 2648,
"s": 2577,
"text": "This program runs in about 29 seconds when running on the machine.\n\n\n\n"
},
{
"code": null,
"e": 3097,
"s": 2648,
"text": "The only difference between the two versions of code is the elimination of attribute access. Instead of using math.sqrt(), the code uses sqrt(). The result.append() method is additionally placed into a local variable re sult_append and reused in the inner loop. However, it must be emphasized that these changes only make sense in frequently executed code, such as loops. So, this optimization really only makes sense in carefully selected places. "
},
{
"code": null,
"e": 3328,
"s": 3097,
"text": "As previously noted, local variables are faster than global variables. For frequently accessed names, speedups can be obtained by making those names as local as possible.Code #5 : Modified version of the compute_roots() function "
},
{
"code": null,
"e": 3336,
"s": 3328,
"text": "Python3"
},
{
"code": "import math def compute_roots(nums): sqrt = math.sqrt result = [] result_append = result.append for n in nums: result_append(sqrt(n)) return result",
"e": 3506,
"s": 3336,
"text": null
},
{
"code": null,
"e": 4107,
"s": 3506,
"text": "In this version, sqrt has been lifted from the math module and placed into a local variable. This code will run about 25 seconds (an improvement over the previous version, which took 29 seconds). That additional speedup is due to a local lookup of sqrt being a bit faster than a global lookup of sqrt. Locality arguments also apply when working in classes. In general, looking up a value such as self.name will be considerably slower than accessing a local variable. In inner loops, it might pay to lift commonly accessed attributes into a local variable as shown in the code given below. Code #6 : "
},
{
"code": null,
"e": 4115,
"s": 4107,
"text": "Python3"
},
{
"code": "# Slowerclass SomeClass: ... def method(self): for x in s: op(self.value)# Fasterclass SomeClass: ... def method(self): value = self.value for x in s: op(value)",
"e": 4331,
"s": 4115,
"text": null
},
{
"code": null,
"e": 7271,
"s": 4331,
"text": "The reason Python is slow is because itβs dynamically typed now weβre going to talk about this more in detail but I want to give a comparison to a language like Java. Now in Java, everything is statically typed and this language is actually compiled before it runs, unlike Python thatβs compiled at runtime through an interpreter. Now what happens in Java is when you write code, you need to define what type each of your variables is going to be, what type your methods and functions are going to be returning and you pretty much have to define exactly what everythingβs going to be throughout your code. Now although this leads to much longer development times and takes a much longer time to write your code but what it does is increase efficiency when you are compiling, now the reason this actually works and the reason it works so much faster than Python code is because if you know the type that a specific variable or object is going to be, you can perform a ton of different optimizations and avoid performing a ton of different checks while youβre actually running the code because these checks are performed at compile time in Java essentially you canβt compile any Java code that hasnβt actual or even just like typed errors while youβre writing that code you are going to try to compile it and it would say like this type isnβt accurate, you canβt do this, you canβt compile it because it knows that when it comes to runtime thatβs not going to work so essentially all of these checks that actually needs to be performed in Python when the code is running are performed beforehand and thereβs just a ton of optimization done because of this statically typed length. Now one may ask a question like, Why doesnβt Python do this? Answer to this would be Python is dynamically typed which simply means that any variable can change its type and can change itβs value at any point in the program while itβs running which means that we canβt actually compile the entire program beforehand because we canβt do all of these checks at once because we donβt know what type these variables are going to be, they are going to change at runtime, different things are going to happen and because of that we canβt get all these optimization that we might have in a lower level language like Java, C or C++ and that is kind of the fundamental reason the language is slow, this dynamic typing and any fast language is going to have a compiler thatβs going to run through, itβs going to make sure that everything is good, itβs going to do all these checks before it actually ends up running the code at runtime where what happens in Python is all of your code is actually compiled and checked at runtime so rather than compiling it before and taking all that time beforehand while youβre running the code , many different checks are happening to make sure that say this object is correct, these types are proper, everything is working the same. "
},
{
"code": null,
"e": 8278,
"s": 7271,
"text": "Now the next thing to talk about is obviously the lack of concurrency in Python. This is going to be the major kind of factor on speed, if youβre writing an application in Java, C, you can spread everything out throughout multiple threads which allows you to utilize all the cores of your CPU so to break this down in modern-day computing most of us have four core CPUs or higher and that allows us to actually run four tasks at the exact same time concurrently now with Python this isnβt possible. Python says, well for each interpreter we can have at most one thread running at a time and a thread is just some kind of operation thatβs happening on the CPU core so that means even if we create many threads in our Python program we can only be using one CPU core while in a Java program or a C program could be using all eight or be using all four which will obviously lead to 4X or 8X increase in speed, now we can get around this in Python by using multiprocessing, but there are some issues with that."
},
{
"code": null,
"e": 8287,
"s": 8278,
"text": "rgndunes"
},
{
"code": null,
"e": 8302,
"s": 8287,
"text": "python-utility"
},
{
"code": null,
"e": 8309,
"s": 8302,
"text": "Python"
}
] |
Socket Programming with Multi-threading in Python
|
14 Jul, 2022
Prerequisite : Socket Programming in Python, Multi-threading in PythonSocket Programming-> It helps us to connect a client to a server. Client is message sender and receiver and server is just a listener that works on data sent by client.What is a Thread? A thread is a light-weight process that does not require much memory overhead, they are cheaper than processes.What is Multi-threading Socket Programming? port on your computerMultithreading is a process of executing multiple threads simultaneously in a single process.Multi-threading Modules : A _thread module & threading module is used for multi-threading in python, these modules help in synchronization and provide a lock to a thread in use.
from _thread import *
import threading
A lock object is created by->
print_lock = threading.Lock()
A lock has two states, βlockedβ or βunlockedβ. It has two basic methods acquire() and release(). When the state is unlocked print_lock.acquire() is used to change state to locked and print_lock.release() is used to change state to unlock.The function thread.start_new_thread() is used to start a new thread and return its identifier. The first argument is the function to call and its second argument is a tuple containing the positional list of arguments.Letβs study client-server multithreading socket programming by code- Note:-The code works with python3. Multi-threaded Server Code
Python3
# import socket programming libraryimport socket # import thread modulefrom _thread import *import threading print_lock = threading.Lock() # thread functiondef threaded(c): while True: # data received from client data = c.recv(1024) if not data: print('Bye') # lock released on exit print_lock.release() break # reverse the given string from client data = data[::-1] # send back reversed string to client c.send(data) # connection closed c.close() def Main(): host = "" # reserve a port on your computer # in our case it is 12345 but it # can be anything port = 12345 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) print("socket binded to port", port) # put the socket into listening mode s.listen(5) print("socket is listening") # a forever loop until client wants to exit while True: # establish connection with client c, addr = s.accept() # lock acquired by client print_lock.acquire() print('Connected to :', addr[0], ':', addr[1]) # Start a new thread and return its identifier start_new_thread(threaded, (c,)) s.close() if __name__ == '__main__': Main()
Console Window:
socket binded to port 12345
socket is listening
Connected to : 127.0.0.1 : 11600
Bye
Client Code
Python
# Import socket moduleimport socket def Main(): # local host IP '127.0.0.1' host = '127.0.0.1' # Define the port on which you want to connect port = 12345 s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # connect to server on local computer s.connect((host,port)) # message you send to server message = "shaurya says geeksforgeeks" while True: # message sent to server s.send(message.encode('ascii')) # message received from server data = s.recv(1024) # print the received message # here it would be a reverse of sent message print('Received from the server :',str(data.decode('ascii'))) # ask the client whether he wants to continue ans = input('\nDo you want to continue(y/n) :') if ans == 'y': continue else: break # close the connection s.close() if __name__ == '__main__': Main()
Console Window:
Received from the server : skeegrofskeeg syas ayruahs
Do you want to continue(y/n) :y
Received from the server : skeegrofskeeg syas ayruahs
Do you want to continue(y/n) :n
Process finished with exit code 0
Reference-> https://docs.python.org/2/library/thread.htmlThis article is contributed by SHAURYA UPPAL. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
shreyashagrawal
Akanksha_Rai
varshagumber28
stevewillson
Computer Networks
Python
Computer Networks
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 54,
"s": 26,
"text": "\n14 Jul, 2022"
},
{
"code": null,
"e": 759,
"s": 54,
"text": "Prerequisite : Socket Programming in Python, Multi-threading in PythonSocket Programming-> It helps us to connect a client to a server. Client is message sender and receiver and server is just a listener that works on data sent by client.What is a Thread? A thread is a light-weight process that does not require much memory overhead, they are cheaper than processes.What is Multi-threading Socket Programming? port on your computerMultithreading is a process of executing multiple threads simultaneously in a single process.Multi-threading Modules : A _thread module & threading module is used for multi-threading in python, these modules help in synchronization and provide a lock to a thread in use. "
},
{
"code": null,
"e": 798,
"s": 759,
"text": "from _thread import *\nimport threading"
},
{
"code": null,
"e": 830,
"s": 798,
"text": "A lock object is created by-> "
},
{
"code": null,
"e": 860,
"s": 830,
"text": "print_lock = threading.Lock()"
},
{
"code": null,
"e": 1449,
"s": 860,
"text": "A lock has two states, βlockedβ or βunlockedβ. It has two basic methods acquire() and release(). When the state is unlocked print_lock.acquire() is used to change state to locked and print_lock.release() is used to change state to unlock.The function thread.start_new_thread() is used to start a new thread and return its identifier. The first argument is the function to call and its second argument is a tuple containing the positional list of arguments.Letβs study client-server multithreading socket programming by code- Note:-The code works with python3. Multi-threaded Server Code "
},
{
"code": null,
"e": 1457,
"s": 1449,
"text": "Python3"
},
{
"code": "# import socket programming libraryimport socket # import thread modulefrom _thread import *import threading print_lock = threading.Lock() # thread functiondef threaded(c): while True: # data received from client data = c.recv(1024) if not data: print('Bye') # lock released on exit print_lock.release() break # reverse the given string from client data = data[::-1] # send back reversed string to client c.send(data) # connection closed c.close() def Main(): host = \"\" # reserve a port on your computer # in our case it is 12345 but it # can be anything port = 12345 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) print(\"socket binded to port\", port) # put the socket into listening mode s.listen(5) print(\"socket is listening\") # a forever loop until client wants to exit while True: # establish connection with client c, addr = s.accept() # lock acquired by client print_lock.acquire() print('Connected to :', addr[0], ':', addr[1]) # Start a new thread and return its identifier start_new_thread(threaded, (c,)) s.close() if __name__ == '__main__': Main()",
"e": 2766,
"s": 1457,
"text": null
},
{
"code": null,
"e": 2867,
"s": 2766,
"text": "Console Window:\nsocket binded to port 12345\nsocket is listening\nConnected to : 127.0.0.1 : 11600\nBye"
},
{
"code": null,
"e": 2881,
"s": 2867,
"text": "Client Code "
},
{
"code": null,
"e": 2888,
"s": 2881,
"text": "Python"
},
{
"code": "# Import socket moduleimport socket def Main(): # local host IP '127.0.0.1' host = '127.0.0.1' # Define the port on which you want to connect port = 12345 s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # connect to server on local computer s.connect((host,port)) # message you send to server message = \"shaurya says geeksforgeeks\" while True: # message sent to server s.send(message.encode('ascii')) # message received from server data = s.recv(1024) # print the received message # here it would be a reverse of sent message print('Received from the server :',str(data.decode('ascii'))) # ask the client whether he wants to continue ans = input('\\nDo you want to continue(y/n) :') if ans == 'y': continue else: break # close the connection s.close() if __name__ == '__main__': Main()",
"e": 3825,
"s": 2888,
"text": null
},
{
"code": null,
"e": 4050,
"s": 3825,
"text": "Console Window:\nReceived from the server : skeegrofskeeg syas ayruahs\n\nDo you want to continue(y/n) :y\nReceived from the server : skeegrofskeeg syas ayruahs\n\nDo you want to continue(y/n) :n\n\nProcess finished with exit code 0"
},
{
"code": null,
"e": 4529,
"s": 4050,
"text": "Reference-> https://docs.python.org/2/library/thread.htmlThis article is contributed by SHAURYA UPPAL. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "
},
{
"code": null,
"e": 4545,
"s": 4529,
"text": "shreyashagrawal"
},
{
"code": null,
"e": 4558,
"s": 4545,
"text": "Akanksha_Rai"
},
{
"code": null,
"e": 4573,
"s": 4558,
"text": "varshagumber28"
},
{
"code": null,
"e": 4586,
"s": 4573,
"text": "stevewillson"
},
{
"code": null,
"e": 4604,
"s": 4586,
"text": "Computer Networks"
},
{
"code": null,
"e": 4611,
"s": 4604,
"text": "Python"
},
{
"code": null,
"e": 4629,
"s": 4611,
"text": "Computer Networks"
}
] |
Python β Check List elements from Dictionary List
|
10 May, 2020
Sometimes, while working with data, we can have a problem in which we need to check for list element presence as a particular key in list of records. This kind of problem can occur in domains in which data are involved like web development and Machine Learning. Lets discuss certain ways in which this task can be solved.
Input : test_list = [{βPriceβ: 20, βColorβ: βOrangeβ}, {βPriceβ: 25, βColorβ: βYellowβ}]Output : [True, False, True, False]
Input : test_list = [{βColorβ: βPinkβ, βPriceβ: 50}]Output : [False, False, False, False]
Method #1 : Using loopThis is brute way to solve this problem. In this, we iterate will all the dictionaries for each value from list and compare with the desired key and return True for records that possess it.
# Python3 code to demonstrate working of # Check List elements from Dictionary List# Using loop # helpr_funcdef check_ele(ele, test_list): for sub in test_list: for item in sub.values(): if ele == item: return True return False # initializing listtest_list = [{'Name' : 'Apple', 'Price' : 18, 'Color' : 'Red'}, {'Name' : 'Mango', 'Price' : 20, 'Color' : 'Yellow'}, {'Name' : 'Orange', 'Price' : 24, 'Color' : 'Orange'}, {'Name' : 'Plum', 'Price' : 28, 'Color' : 'Red'}] # printing original listprint("The original list is : " + str(test_list)) # initializing Values list val_list = ['Yellow', 'Red', 'Orange', 'Green'] # Check List elements from Dictionary List# Using loopres = []for ele in val_list: res.append(check_ele(ele, test_list)) # printing result print("The Association list in Order : " + str(res))
The original list is : [{βNameβ: βAppleβ, βColorβ: βRedβ, βPriceβ: 18}, {βNameβ: βMangoβ, βColorβ: βYellowβ, βPriceβ: 20}, {βNameβ: βOrangeβ, βColorβ: βOrangeβ, βPriceβ: 24}, {βNameβ: βPlumβ, βColorβ: βRedβ, βPriceβ: 28}]
The Association list in Order : [True, True, True, False]
Method #2 : Using any() + generator expressionThe use of any() with integration with generator expression can solve this problem. In this we reduce the lines of code by reducing inner loop, by testing using any().
# Python3 code to demonstrate working of # Check List elements from Dictionary List# Using any() + generator expression # initializing listtest_list = [{'Name' : 'Apple', 'Price' : 18, 'Color' : 'Red'}, {'Name' : 'Mango', 'Price' : 20, 'Color' : 'Yellow'}, {'Name' : 'Orange', 'Price' : 24, 'Color' : 'Orange'}, {'Name' : 'Plum', 'Price' : 28, 'Color' : 'Red'}] # printing original listprint("The original list is : " + str(test_list)) # initializing Values list val_list = ['Yellow', 'Red', 'Orange', 'Green'] # initializing Key key = 'Color' # Check List elements from Dictionary List# Using loopres = [any(clr == sub[key] for sub in test_list) for clr in val_list] # printing result print("The Association list in Order : " + str(res))
The original list is : [{βNameβ: βAppleβ, βColorβ: βRedβ, βPriceβ: 18}, {βNameβ: βMangoβ, βColorβ: βYellowβ, βPriceβ: 20}, {βNameβ: βOrangeβ, βColorβ: βOrangeβ, βPriceβ: 24}, {βNameβ: βPlumβ, βColorβ: βRedβ, βPriceβ: 28}]
The Association list in Order : [True, True, True, False]
Python dictionary-programs
Python
Python Programs
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n10 May, 2020"
},
{
"code": null,
"e": 350,
"s": 28,
"text": "Sometimes, while working with data, we can have a problem in which we need to check for list element presence as a particular key in list of records. This kind of problem can occur in domains in which data are involved like web development and Machine Learning. Lets discuss certain ways in which this task can be solved."
},
{
"code": null,
"e": 474,
"s": 350,
"text": "Input : test_list = [{βPriceβ: 20, βColorβ: βOrangeβ}, {βPriceβ: 25, βColorβ: βYellowβ}]Output : [True, False, True, False]"
},
{
"code": null,
"e": 564,
"s": 474,
"text": "Input : test_list = [{βColorβ: βPinkβ, βPriceβ: 50}]Output : [False, False, False, False]"
},
{
"code": null,
"e": 776,
"s": 564,
"text": "Method #1 : Using loopThis is brute way to solve this problem. In this, we iterate will all the dictionaries for each value from list and compare with the desired key and return True for records that possess it."
},
{
"code": "# Python3 code to demonstrate working of # Check List elements from Dictionary List# Using loop # helpr_funcdef check_ele(ele, test_list): for sub in test_list: for item in sub.values(): if ele == item: return True return False # initializing listtest_list = [{'Name' : 'Apple', 'Price' : 18, 'Color' : 'Red'}, {'Name' : 'Mango', 'Price' : 20, 'Color' : 'Yellow'}, {'Name' : 'Orange', 'Price' : 24, 'Color' : 'Orange'}, {'Name' : 'Plum', 'Price' : 28, 'Color' : 'Red'}] # printing original listprint(\"The original list is : \" + str(test_list)) # initializing Values list val_list = ['Yellow', 'Red', 'Orange', 'Green'] # Check List elements from Dictionary List# Using loopres = []for ele in val_list: res.append(check_ele(ele, test_list)) # printing result print(\"The Association list in Order : \" + str(res)) ",
"e": 1674,
"s": 776,
"text": null
},
{
"code": null,
"e": 1896,
"s": 1674,
"text": "The original list is : [{βNameβ: βAppleβ, βColorβ: βRedβ, βPriceβ: 18}, {βNameβ: βMangoβ, βColorβ: βYellowβ, βPriceβ: 20}, {βNameβ: βOrangeβ, βColorβ: βOrangeβ, βPriceβ: 24}, {βNameβ: βPlumβ, βColorβ: βRedβ, βPriceβ: 28}]"
},
{
"code": null,
"e": 1954,
"s": 1896,
"text": "The Association list in Order : [True, True, True, False]"
},
{
"code": null,
"e": 2170,
"s": 1956,
"text": "Method #2 : Using any() + generator expressionThe use of any() with integration with generator expression can solve this problem. In this we reduce the lines of code by reducing inner loop, by testing using any()."
},
{
"code": "# Python3 code to demonstrate working of # Check List elements from Dictionary List# Using any() + generator expression # initializing listtest_list = [{'Name' : 'Apple', 'Price' : 18, 'Color' : 'Red'}, {'Name' : 'Mango', 'Price' : 20, 'Color' : 'Yellow'}, {'Name' : 'Orange', 'Price' : 24, 'Color' : 'Orange'}, {'Name' : 'Plum', 'Price' : 28, 'Color' : 'Red'}] # printing original listprint(\"The original list is : \" + str(test_list)) # initializing Values list val_list = ['Yellow', 'Red', 'Orange', 'Green'] # initializing Key key = 'Color' # Check List elements from Dictionary List# Using loopres = [any(clr == sub[key] for sub in test_list) for clr in val_list] # printing result print(\"The Association list in Order : \" + str(res)) ",
"e": 2952,
"s": 2170,
"text": null
},
{
"code": null,
"e": 3174,
"s": 2952,
"text": "The original list is : [{βNameβ: βAppleβ, βColorβ: βRedβ, βPriceβ: 18}, {βNameβ: βMangoβ, βColorβ: βYellowβ, βPriceβ: 20}, {βNameβ: βOrangeβ, βColorβ: βOrangeβ, βPriceβ: 24}, {βNameβ: βPlumβ, βColorβ: βRedβ, βPriceβ: 28}]"
},
{
"code": null,
"e": 3232,
"s": 3174,
"text": "The Association list in Order : [True, True, True, False]"
},
{
"code": null,
"e": 3259,
"s": 3232,
"text": "Python dictionary-programs"
},
{
"code": null,
"e": 3266,
"s": 3259,
"text": "Python"
},
{
"code": null,
"e": 3282,
"s": 3266,
"text": "Python Programs"
}
] |
Python | Crop image using pillow
|
16 Oct, 2019
In this article, we will learn to crop an image using pillow library. Cropping an image means to select a rectangular region inside an image and removing everything outside the rectangle. To crop an image we make use of crop() method on image objects.
Syntax : IMG.crop(box_tuple)
Parameters :Image_path- Location of the imageIMG- Image to cropbox_tuple- [left, up, right, bottom] of the image to crop
Returns : An Image object which represents the cropped image.
Example 1:
# import Image modulefrom PIL import Image # open the imageImage1 = Image.open('D:/cat.jpg') # crop the imagecroppedIm = Image1.crop((130, 120, 200, 200)) # show the imagecroppedIm.show()
Input Image :
Output :
Example 2:
# import Image modulefrom PIL import Image # open the imageImage1 = Image.open('D:/cat.jpg') # crop the imagecroppedIm = Image1.crop((130, 50, 250, 150)) # show the imagecroppedIm.show()
Input Image :
Output :
nidhi_biet
Python-pil
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
Enumerate() in Python
Different ways to create Pandas Dataframe
Read a file line by line in Python
How to Install PIP on Windows ?
Python String | replace()
Python OOPs Concepts
Python Classes and Objects
*args and **kwargs in Python
Introduction To PYTHON
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n16 Oct, 2019"
},
{
"code": null,
"e": 280,
"s": 28,
"text": "In this article, we will learn to crop an image using pillow library. Cropping an image means to select a rectangular region inside an image and removing everything outside the rectangle. To crop an image we make use of crop() method on image objects."
},
{
"code": null,
"e": 309,
"s": 280,
"text": "Syntax : IMG.crop(box_tuple)"
},
{
"code": null,
"e": 430,
"s": 309,
"text": "Parameters :Image_path- Location of the imageIMG- Image to cropbox_tuple- [left, up, right, bottom] of the image to crop"
},
{
"code": null,
"e": 492,
"s": 430,
"text": "Returns : An Image object which represents the cropped image."
},
{
"code": null,
"e": 503,
"s": 492,
"text": "Example 1:"
},
{
"code": "# import Image modulefrom PIL import Image # open the imageImage1 = Image.open('D:/cat.jpg') # crop the imagecroppedIm = Image1.crop((130, 120, 200, 200)) # show the imagecroppedIm.show()",
"e": 694,
"s": 503,
"text": null
},
{
"code": null,
"e": 708,
"s": 694,
"text": "Input Image :"
},
{
"code": null,
"e": 717,
"s": 708,
"text": "Output :"
},
{
"code": null,
"e": 728,
"s": 717,
"text": "Example 2:"
},
{
"code": "# import Image modulefrom PIL import Image # open the imageImage1 = Image.open('D:/cat.jpg') # crop the imagecroppedIm = Image1.crop((130, 50, 250, 150)) # show the imagecroppedIm.show()",
"e": 918,
"s": 728,
"text": null
},
{
"code": null,
"e": 932,
"s": 918,
"text": "Input Image :"
},
{
"code": null,
"e": 941,
"s": 932,
"text": "Output :"
},
{
"code": null,
"e": 952,
"s": 941,
"text": "nidhi_biet"
},
{
"code": null,
"e": 963,
"s": 952,
"text": "Python-pil"
},
{
"code": null,
"e": 970,
"s": 963,
"text": "Python"
},
{
"code": null,
"e": 1068,
"s": 970,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 1086,
"s": 1068,
"text": "Python Dictionary"
},
{
"code": null,
"e": 1108,
"s": 1086,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 1150,
"s": 1108,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 1185,
"s": 1150,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 1217,
"s": 1185,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 1243,
"s": 1217,
"text": "Python String | replace()"
},
{
"code": null,
"e": 1264,
"s": 1243,
"text": "Python OOPs Concepts"
},
{
"code": null,
"e": 1291,
"s": 1264,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 1320,
"s": 1291,
"text": "*args and **kwargs in Python"
}
] |
Facts about Cython Programming Language
|
20 Aug, 2020
Cython is a programming language. It can run on Windows, macOS, and Linux operating systems. It had a version ranging from 2.6 to 3.8. Cython 3.0.0 is under development. In Cython, the Code written in Python is converted to C language. High traffic websites such as Quora use Cython Programming language.
Cython is actually derived from the Pyrex language. It is more advanced and has more features and optimizations than the Pyrex language. Cython was separated from the Pyrex development in the year 2007 because its developers envisioned a wider scope of the language than Pyrex. It was a part of a project called Sage. Cython programming language has a .pyx extension. Scientific users of Python use the Cython programming language a lot. It was created by Guido van Rossum and developed by Robert Bradshaw and Stefan Behnel. It was initially released on the 28th of July, 2007. It had its stable release on 24th March 2020.
Cython is aimed at being the superset of the Python programming language. It is so designed that it gives C like performance along with codes mostly written in the Python language allowing extra syntax that is inspired by C. When Cython is compiled it gives CPython extension modules. It provides lesser computational overhead than Python at run-time. C and C++ codes can be wrapped into the Cython modules. The Cython is dependent on the Python interpreter and standard library. Cython employs optimistic optimizations, optional type inference, low control structures overheads, and low function call overhead. Its performance is dependent on the generation and implementation of the C codes. The Cython programming Language is much like Python with very little difference. To understand this, let us take, for example, Python code and its relevant Cython code. Python code:
Python3
def f(x): return x**2-x def integrate_f(a, b, N): s = 0 dx = (b-a)/N for i in range(N): s += f(a+i*dx) return s * dx
Cython code:
Python3
cdef double f(double x): return x**2-xdef integrate_f(double a, double b, int N): cdef int i cdef double s, x, dx s = 0 dx = (b-a)/N for i in range(N): s += f(a+i*dx) return s * dx
In the two codes, it can be seen that very little has been changed. Only the variables have been explicitly declared and it affects the performance thereby improving its speed.
The Cython programming language is used to speed the written codes. Cython language allows easy working with the C libraries. Cython also supports C++. Cython allows easy interaction with the Python Libraries without Python in the way. Cython Libraries have the same garbage collection as that of Python. It is also possible to manage the C-level Structures using malloc/free. Cython automatically checks for runtime problems that arise in C. The C-code generated by Cython is very safe. If error checks are not required at runtime, they can even be disabled. Cython also uses the Global Interpreter Lock of Python. It is used for countering the problem of resource contention. Cython can be used in Python application and software modules that need extra protection from attacks such as snooping.
When Cython encounters the Python codes its complete conversion to C language is not possible which results in several calls to the Python interpreter. This might give a little speedup (15-20%) or in some cases either have no effect or degradation of performance. Cython code is best in its performance when it is written only in the C language. Cython provides a source code report that illustrates which parts of it are written in Python to avoid performance bottlenecks.
Cython has the ability to improve the usage of the third-party number-crunching libraries like NumPy that are based on C. It uses NumPy to counter Python bottleneck problems by taking them outside the loop. Fast access to arrays of Numpy is provided by Cython. The syntax in the Cython written for Numpy is similar to the syntax that is used in Python. For faster bindings of the Cython and Numpy, the custom of Cython is needed. This includes the use of the statement, βcimportβ. This statement is used by the Cython programming language to see the C- level constructs at the time of program compilation.
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n20 Aug, 2020"
},
{
"code": null,
"e": 334,
"s": 28,
"text": "Cython is a programming language. It can run on Windows, macOS, and Linux operating systems. It had a version ranging from 2.6 to 3.8. Cython 3.0.0 is under development. In Cython, the Code written in Python is converted to C language. High traffic websites such as Quora use Cython Programming language. "
},
{
"code": null,
"e": 959,
"s": 334,
"text": "Cython is actually derived from the Pyrex language. It is more advanced and has more features and optimizations than the Pyrex language. Cython was separated from the Pyrex development in the year 2007 because its developers envisioned a wider scope of the language than Pyrex. It was a part of a project called Sage. Cython programming language has a .pyx extension. Scientific users of Python use the Cython programming language a lot. It was created by Guido van Rossum and developed by Robert Bradshaw and Stefan Behnel. It was initially released on the 28th of July, 2007. It had its stable release on 24th March 2020. "
},
{
"code": null,
"e": 1836,
"s": 959,
"text": "Cython is aimed at being the superset of the Python programming language. It is so designed that it gives C like performance along with codes mostly written in the Python language allowing extra syntax that is inspired by C. When Cython is compiled it gives CPython extension modules. It provides lesser computational overhead than Python at run-time. C and C++ codes can be wrapped into the Cython modules. The Cython is dependent on the Python interpreter and standard library. Cython employs optimistic optimizations, optional type inference, low control structures overheads, and low function call overhead. Its performance is dependent on the generation and implementation of the C codes. The Cython programming Language is much like Python with very little difference. To understand this, let us take, for example, Python code and its relevant Cython code. Python code: "
},
{
"code": null,
"e": 1844,
"s": 1836,
"text": "Python3"
},
{
"code": "def f(x): return x**2-x def integrate_f(a, b, N): s = 0 dx = (b-a)/N for i in range(N): s += f(a+i*dx) return s * dx",
"e": 1984,
"s": 1844,
"text": null
},
{
"code": null,
"e": 1998,
"s": 1984,
"text": "Cython code: "
},
{
"code": null,
"e": 2006,
"s": 1998,
"text": "Python3"
},
{
"code": "cdef double f(double x): return x**2-xdef integrate_f(double a, double b, int N): cdef int i cdef double s, x, dx s = 0 dx = (b-a)/N for i in range(N): s += f(a+i*dx) return s * dx",
"e": 2215,
"s": 2006,
"text": null
},
{
"code": null,
"e": 2392,
"s": 2215,
"text": "In the two codes, it can be seen that very little has been changed. Only the variables have been explicitly declared and it affects the performance thereby improving its speed."
},
{
"code": null,
"e": 3191,
"s": 2392,
"text": "The Cython programming language is used to speed the written codes. Cython language allows easy working with the C libraries. Cython also supports C++. Cython allows easy interaction with the Python Libraries without Python in the way. Cython Libraries have the same garbage collection as that of Python. It is also possible to manage the C-level Structures using malloc/free. Cython automatically checks for runtime problems that arise in C. The C-code generated by Cython is very safe. If error checks are not required at runtime, they can even be disabled. Cython also uses the Global Interpreter Lock of Python. It is used for countering the problem of resource contention. Cython can be used in Python application and software modules that need extra protection from attacks such as snooping. "
},
{
"code": null,
"e": 3665,
"s": 3191,
"text": "When Cython encounters the Python codes its complete conversion to C language is not possible which results in several calls to the Python interpreter. This might give a little speedup (15-20%) or in some cases either have no effect or degradation of performance. Cython code is best in its performance when it is written only in the C language. Cython provides a source code report that illustrates which parts of it are written in Python to avoid performance bottlenecks."
},
{
"code": null,
"e": 4272,
"s": 3665,
"text": "Cython has the ability to improve the usage of the third-party number-crunching libraries like NumPy that are based on C. It uses NumPy to counter Python bottleneck problems by taking them outside the loop. Fast access to arrays of Numpy is provided by Cython. The syntax in the Cython written for Numpy is similar to the syntax that is used in Python. For faster bindings of the Cython and Numpy, the custom of Cython is needed. This includes the use of the statement, βcimportβ. This statement is used by the Cython programming language to see the C- level constructs at the time of program compilation. "
},
{
"code": null,
"e": 4279,
"s": 4272,
"text": "Python"
}
] |
Write an Efficient C Program to Reverse Bits of a Number
|
27 May, 2022
Given an unsigned integer, reverse all bits of it and return the number with reversed bits.
Input : n = 1Output : 2147483648 Explanation : On a machine with size of unsigned bit as 32. Reverse of 0....001 is 100....0.
Input : n = 2147483648Output : 1
Method1 β Simple: Loop through all the bits of an integer. If a bit at ith position is set in the i/p no. then set the bit at (NO_OF_BITS β 1) β i in o/p. Where NO_OF_BITS is number of bits present in the given number.
Below is the implementation of the above approach:
c
// C code to implement the approach#include <stdio.h> // Function to reverse bits of numunsigned int reverseBits(unsigned int num){ unsigned int NO_OF_BITS = sizeof(num) * 8; unsigned int reverse_num = 0; int i; for (i = 0; i < NO_OF_BITS; i++) { if ((num & (1 << i))) reverse_num |= 1 << ((NO_OF_BITS - 1) - i); } return reverse_num;} // Driver codeint main(){ unsigned int x = 2; printf("%u", reverseBits(x)); getchar();}
1073741824
Time Complexity: O(Log n). Time complexity would be Log(num) as there are log(num) bits in a binary number βnumβ and weβre looping through all bits.Auxiliary space: O(1)
Method 2 β Standard: The idea is to keep putting set bits of the num in reverse_num until num becomes zero. After num becomes zero, shift the remaining bits of reverse_num. Let num is stored using 8 bits and num be 00000110. After the loop you will get reverse_num as 00000011. Now you need to left shift reverse_num 5 more times and you get the exact reverse 01100000.
Below is the implementation of the above approach:
c
// C code to implement the approach#include <stdio.h> // Function to reverse bits of numunsigned int reverseBits(unsigned int num){ unsigned int count = sizeof(num) * 8 - 1; unsigned int reverse_num = num; num >>= 1; while (num) { reverse_num <<= 1; reverse_num |= num & 1; num >>= 1; count--; } reverse_num <<= count; return reverse_num;} // Driver's codeint main(){ unsigned int x = 1; printf("%u", reverseBits(x)); getchar();}
2147483648
Time Complexity: O(logn) where n is the given numberAuxiliary space: O(1)
Method 3 β Lookup Table: We can reverse the bits of a number in O(1) if we know the size of the number. We can implement it using look up table. Please refer Reverse bits using lookup table in O(1) time for details.
Source : https://graphics.stanford.edu/~seander/bithacks.html
mohity
parthdhake
avtarkumar719
chandramauliguptach
Amazon
HCL
Nvidia
Qualcomm
Bit Magic
Mathematical
Amazon
Qualcomm
HCL
Nvidia
Mathematical
Bit Magic
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 52,
"s": 24,
"text": "\n27 May, 2022"
},
{
"code": null,
"e": 144,
"s": 52,
"text": "Given an unsigned integer, reverse all bits of it and return the number with reversed bits."
},
{
"code": null,
"e": 271,
"s": 144,
"text": "Input : n = 1Output : 2147483648 Explanation : On a machine with size of unsigned bit as 32. Reverse of 0....001 is 100....0."
},
{
"code": null,
"e": 332,
"s": 271,
"text": "Input : n = 2147483648Output : 1 "
},
{
"code": null,
"e": 553,
"s": 332,
"text": "Method1 β Simple: Loop through all the bits of an integer. If a bit at ith position is set in the i/p no. then set the bit at (NO_OF_BITS β 1) β i in o/p. Where NO_OF_BITS is number of bits present in the given number. "
},
{
"code": null,
"e": 604,
"s": 553,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 606,
"s": 604,
"text": "c"
},
{
"code": "// C code to implement the approach#include <stdio.h> // Function to reverse bits of numunsigned int reverseBits(unsigned int num){ unsigned int NO_OF_BITS = sizeof(num) * 8; unsigned int reverse_num = 0; int i; for (i = 0; i < NO_OF_BITS; i++) { if ((num & (1 << i))) reverse_num |= 1 << ((NO_OF_BITS - 1) - i); } return reverse_num;} // Driver codeint main(){ unsigned int x = 2; printf(\"%u\", reverseBits(x)); getchar();}",
"e": 1075,
"s": 606,
"text": null
},
{
"code": null,
"e": 1086,
"s": 1075,
"text": "1073741824"
},
{
"code": null,
"e": 1256,
"s": 1086,
"text": "Time Complexity: O(Log n). Time complexity would be Log(num) as there are log(num) bits in a binary number βnumβ and weβre looping through all bits.Auxiliary space: O(1)"
},
{
"code": null,
"e": 1627,
"s": 1256,
"text": "Method 2 β Standard: The idea is to keep putting set bits of the num in reverse_num until num becomes zero. After num becomes zero, shift the remaining bits of reverse_num. Let num is stored using 8 bits and num be 00000110. After the loop you will get reverse_num as 00000011. Now you need to left shift reverse_num 5 more times and you get the exact reverse 01100000. "
},
{
"code": null,
"e": 1678,
"s": 1627,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 1680,
"s": 1678,
"text": "c"
},
{
"code": "// C code to implement the approach#include <stdio.h> // Function to reverse bits of numunsigned int reverseBits(unsigned int num){ unsigned int count = sizeof(num) * 8 - 1; unsigned int reverse_num = num; num >>= 1; while (num) { reverse_num <<= 1; reverse_num |= num & 1; num >>= 1; count--; } reverse_num <<= count; return reverse_num;} // Driver's codeint main(){ unsigned int x = 1; printf(\"%u\", reverseBits(x)); getchar();}",
"e": 2169,
"s": 1680,
"text": null
},
{
"code": null,
"e": 2180,
"s": 2169,
"text": "2147483648"
},
{
"code": null,
"e": 2254,
"s": 2180,
"text": "Time Complexity: O(logn) where n is the given numberAuxiliary space: O(1)"
},
{
"code": null,
"e": 2472,
"s": 2254,
"text": " Method 3 β Lookup Table: We can reverse the bits of a number in O(1) if we know the size of the number. We can implement it using look up table. Please refer Reverse bits using lookup table in O(1) time for details. "
},
{
"code": null,
"e": 2534,
"s": 2472,
"text": "Source : https://graphics.stanford.edu/~seander/bithacks.html"
},
{
"code": null,
"e": 2541,
"s": 2534,
"text": "mohity"
},
{
"code": null,
"e": 2552,
"s": 2541,
"text": "parthdhake"
},
{
"code": null,
"e": 2566,
"s": 2552,
"text": "avtarkumar719"
},
{
"code": null,
"e": 2586,
"s": 2566,
"text": "chandramauliguptach"
},
{
"code": null,
"e": 2593,
"s": 2586,
"text": "Amazon"
},
{
"code": null,
"e": 2597,
"s": 2593,
"text": "HCL"
},
{
"code": null,
"e": 2604,
"s": 2597,
"text": "Nvidia"
},
{
"code": null,
"e": 2613,
"s": 2604,
"text": "Qualcomm"
},
{
"code": null,
"e": 2623,
"s": 2613,
"text": "Bit Magic"
},
{
"code": null,
"e": 2636,
"s": 2623,
"text": "Mathematical"
},
{
"code": null,
"e": 2643,
"s": 2636,
"text": "Amazon"
},
{
"code": null,
"e": 2652,
"s": 2643,
"text": "Qualcomm"
},
{
"code": null,
"e": 2656,
"s": 2652,
"text": "HCL"
},
{
"code": null,
"e": 2663,
"s": 2656,
"text": "Nvidia"
},
{
"code": null,
"e": 2676,
"s": 2663,
"text": "Mathematical"
},
{
"code": null,
"e": 2686,
"s": 2676,
"text": "Bit Magic"
}
] |
Highlight the negative values red and positive values black in Pandas Dataframe
|
20 Aug, 2020
Letβs see various methods to Highlight the positive values red and negative values black in Pandas Dataframe.First, Letβs make a Dataframe:
Python3
# Import Required Librariesimport pandas as pdimport numpy as np # Create a dictionary for the dataframedict = { 'Name': ['Sukritin', 'Sumit Tyagi', 'Akriti Goel', 'Sanskriti', 'Abhishek Jain'], 'Age': [22, 20, 45, 21, 22], 'Marks': [90, 84, -33, -87, 82]} # Converting Dictionary to# Pandas Dataframedf = pd.DataFrame(dict) # Print Dataframeprint(df)
Output:
Now, come to the highlighting part. Our objective is to highlight negative values red and positive values black.
Method 1: Using Dataframe.style.apply().
Syntax: DataFrame.style.apply(self, func, axis=0, subset=None, **kwargs)Parameters:
func: It should take a pandas.Series or pandas.DataFrame based on the axis and should return an object with the same shape.
axis: {0 or βindexβ, 1 or βcolumnsβ, None}, default 0. Apply to each column (axis=0 or βindexβ), to each row (axis=1 or βcolumnsβ), or to the entire DataFrame at once with axis=None.
subset: Set of columns or rows on which you want to call the func.
**kwargs: Pass along to func.
Returns: Styler object.
Example 1: Highlighting text.
Python3
# Define a function for colouring # negative values red and # positive values blackdef highlight_max(s): if s.dtype == np.object: is_neg = [False for _ in range(s.shape[0])] else: is_neg = s < 0 return ['color: red;' if cell else 'color:black' for cell in is_neg] # Using apply method of style # attribute of Pandas DataFramedf.style.apply(highlight_max)
Output:
Example 2: Highlighting cells instead of text.
Python3
# Define a function which # returns the list for # df.style.apply() methoddef highlight_max(s): if s.dtype == np.object: is_neg = [False for _ in range(s.shape[0])] else: is_neg = s < 0 return ['background: red; color:white' if cell else 'background:black; color:white' for cell in is_neg] # Using apply method of style # attribute of Pandas DataFramedf.style.apply(highlight_max)
Output:
Method 2: Using dataframe.style.applymap() method.
Syntax: DataFrame.style.applymap(self, func, subset=None, **kwargs)Parameters:
func: It takes a scalar value and return the scalar values
subset: Set of columns or rows on which you want to call the func.
**kwargs: Pass along to func.
Returns: Styler object.
Example 1: Highlighting text.
Python3
# Define a function for # colouring negative values # red and positive values blackdef highlight_max(cell): if type(cell) != str and cell < 0 : return 'color: red' else: return 'color: black' df.style.applymap(highlight_max)
Output:
Example 2: Highlighting cells instead of text.
Python3
# Define a function which # returns string for # applymap() methoddef highlight_max(cell): if type(cell) != str and cell < 0 : return 'background: red; color:black' else: return 'background: black; color: white' df.style.applymap(highlight_max)
Output:
Note: pandas.DataFrame.applymap() method passes only single cell into the callable function while the pandas.DataFrame.apply() passes the pandas.Series to the callable function.
Reference: Styling in Pandas
Python pandas-dataFrame
Python Pandas-exercise
Python-pandas
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n20 Aug, 2020"
},
{
"code": null,
"e": 168,
"s": 28,
"text": "Letβs see various methods to Highlight the positive values red and negative values black in Pandas Dataframe.First, Letβs make a Dataframe:"
},
{
"code": null,
"e": 176,
"s": 168,
"text": "Python3"
},
{
"code": "# Import Required Librariesimport pandas as pdimport numpy as np # Create a dictionary for the dataframedict = { 'Name': ['Sukritin', 'Sumit Tyagi', 'Akriti Goel', 'Sanskriti', 'Abhishek Jain'], 'Age': [22, 20, 45, 21, 22], 'Marks': [90, 84, -33, -87, 82]} # Converting Dictionary to# Pandas Dataframedf = pd.DataFrame(dict) # Print Dataframeprint(df)",
"e": 558,
"s": 176,
"text": null
},
{
"code": null,
"e": 567,
"s": 558,
"text": "Output: "
},
{
"code": null,
"e": 680,
"s": 567,
"text": "Now, come to the highlighting part. Our objective is to highlight negative values red and positive values black."
},
{
"code": null,
"e": 721,
"s": 680,
"text": "Method 1: Using Dataframe.style.apply()."
},
{
"code": null,
"e": 806,
"s": 721,
"text": "Syntax: DataFrame.style.apply(self, func, axis=0, subset=None, **kwargs)Parameters: "
},
{
"code": null,
"e": 931,
"s": 806,
"text": "func: It should take a pandas.Series or pandas.DataFrame based on the axis and should return an object with the same shape. "
},
{
"code": null,
"e": 1115,
"s": 931,
"text": "axis: {0 or βindexβ, 1 or βcolumnsβ, None}, default 0. Apply to each column (axis=0 or βindexβ), to each row (axis=1 or βcolumnsβ), or to the entire DataFrame at once with axis=None. "
},
{
"code": null,
"e": 1183,
"s": 1115,
"text": "subset: Set of columns or rows on which you want to call the func. "
},
{
"code": null,
"e": 1213,
"s": 1183,
"text": "**kwargs: Pass along to func."
},
{
"code": null,
"e": 1237,
"s": 1213,
"text": "Returns: Styler object."
},
{
"code": null,
"e": 1267,
"s": 1237,
"text": "Example 1: Highlighting text."
},
{
"code": null,
"e": 1275,
"s": 1267,
"text": "Python3"
},
{
"code": "# Define a function for colouring # negative values red and # positive values blackdef highlight_max(s): if s.dtype == np.object: is_neg = [False for _ in range(s.shape[0])] else: is_neg = s < 0 return ['color: red;' if cell else 'color:black' for cell in is_neg] # Using apply method of style # attribute of Pandas DataFramedf.style.apply(highlight_max)",
"e": 1666,
"s": 1275,
"text": null
},
{
"code": null,
"e": 1675,
"s": 1666,
"text": "Output: "
},
{
"code": null,
"e": 1724,
"s": 1677,
"text": "Example 2: Highlighting cells instead of text."
},
{
"code": null,
"e": 1732,
"s": 1724,
"text": "Python3"
},
{
"code": "# Define a function which # returns the list for # df.style.apply() methoddef highlight_max(s): if s.dtype == np.object: is_neg = [False for _ in range(s.shape[0])] else: is_neg = s < 0 return ['background: red; color:white' if cell else 'background:black; color:white' for cell in is_neg] # Using apply method of style # attribute of Pandas DataFramedf.style.apply(highlight_max)",
"e": 2161,
"s": 1732,
"text": null
},
{
"code": null,
"e": 2170,
"s": 2161,
"text": "Output: "
},
{
"code": null,
"e": 2223,
"s": 2172,
"text": "Method 2: Using dataframe.style.applymap() method."
},
{
"code": null,
"e": 2303,
"s": 2223,
"text": "Syntax: DataFrame.style.applymap(self, func, subset=None, **kwargs)Parameters: "
},
{
"code": null,
"e": 2363,
"s": 2303,
"text": "func: It takes a scalar value and return the scalar values "
},
{
"code": null,
"e": 2431,
"s": 2363,
"text": "subset: Set of columns or rows on which you want to call the func. "
},
{
"code": null,
"e": 2461,
"s": 2431,
"text": "**kwargs: Pass along to func."
},
{
"code": null,
"e": 2485,
"s": 2461,
"text": "Returns: Styler object."
},
{
"code": null,
"e": 2515,
"s": 2485,
"text": "Example 1: Highlighting text."
},
{
"code": null,
"e": 2523,
"s": 2515,
"text": "Python3"
},
{
"code": "# Define a function for # colouring negative values # red and positive values blackdef highlight_max(cell): if type(cell) != str and cell < 0 : return 'color: red' else: return 'color: black' df.style.applymap(highlight_max)",
"e": 2769,
"s": 2523,
"text": null
},
{
"code": null,
"e": 2778,
"s": 2769,
"text": "Output: "
},
{
"code": null,
"e": 2827,
"s": 2780,
"text": "Example 2: Highlighting cells instead of text."
},
{
"code": null,
"e": 2835,
"s": 2827,
"text": "Python3"
},
{
"code": "# Define a function which # returns string for # applymap() methoddef highlight_max(cell): if type(cell) != str and cell < 0 : return 'background: red; color:black' else: return 'background: black; color: white' df.style.applymap(highlight_max)",
"e": 3101,
"s": 2835,
"text": null
},
{
"code": null,
"e": 3110,
"s": 3101,
"text": "Output: "
},
{
"code": null,
"e": 3289,
"s": 3110,
"text": " Note: pandas.DataFrame.applymap() method passes only single cell into the callable function while the pandas.DataFrame.apply() passes the pandas.Series to the callable function."
},
{
"code": null,
"e": 3319,
"s": 3289,
"text": "Reference: Styling in Pandas "
},
{
"code": null,
"e": 3343,
"s": 3319,
"text": "Python pandas-dataFrame"
},
{
"code": null,
"e": 3366,
"s": 3343,
"text": "Python Pandas-exercise"
},
{
"code": null,
"e": 3380,
"s": 3366,
"text": "Python-pandas"
},
{
"code": null,
"e": 3387,
"s": 3380,
"text": "Python"
}
] |
Web Scraping using R Language
|
28 Dec, 2021
One of the most important things in the field of Data Science is the skill of getting the right data for the problem you want to solve. Data Scientists donβt always have a prepared database to work on but rather have to pull data from the right sources. For this purpose, APIs and Web Scraping are used.
API (Application Program Interface): An API is a set of methods and tools that allows oneβs to query and retrieve data dynamically. Reddit, Spotify, Twitter, Facebook, and many other companies provide free APIs that enable developers to access the information they store on their servers; others charge for access to their APIs.
Web Scraping: A lot of data isnβt accessible through data sets or APIs but rather exists on the internet as Web pages. So, through web-scraping, one can access the data without waiting for the provider to create an API.
Web scraping is a technique to fetch data from websites. While surfing on the web, many websites donβt allow the user to save data for private use. One way is to manually copy-paste the data, which both tedious and time-consuming. Web Scraping is the automatic process of data extraction from websites. This process is done with the help of web scraping software known as web scrapers. They automatically load and extract data from the websites based on user requirements. These can be custom built to work for one site or can be configured to work with any website.
There are several web scraping tools out there to perform the task and various languages too, having libraries that support web scraping. Among all these languages, R is considered as one of the programming languages for Web Scraping because of features like β a rich library, easy to use, dynamically typed, etc. The commonly used web Scraping tools for R is rvest.
Install the package rvest in your R Studio using the following code.
install.packages('rvest')
Having, knowledge of HTML and CSS will be an added advantage. Itβs observed that most of the Data Scientists are not very familiar with technical knowledge of HTML and CSS. Therefore, letβs use an open-source software named Selector Gadget which will be more than sufficient for anyone in order to perform Web scraping. One can access and download the Selector Gadget extension(https://selectorgadget.com/). Consider that one has this extension installed by following the instructions from the website. Also, consider one using Google chrome and he/she can access the extension in the extension bar to the top right.
rvest maintained by the legendary Hadley Wickham. We can easily scrape data from webpage from this library.
Before starting we will import the rvest library into your code.
R
library(rvest)
Read the HTML code from the webpage using read_html(). Consider this webpage.
R
webpage = read_html("https://www.geeksforgeeks.org /\data-structures-in-r-programming")
Now, letβs start by scraping the heading field. For that, use the selector gadget to get the specific CSS selectors that enclose the heading. One can click on the extension in his/her browser and select the heading field with the cursor.
Once one knows the CSS selector that contains the heading, he/she can use this simple R code to get the heading.
R
# Using CSS selectors to scrape the heading sectionheading = html_node(webpage, '.entry-title') # Converting the heading data to texttext = html_text(heading)print(text)
Output:
[1] "Data Structures in R Programming"
Now, letβs scrape the all paragraph fields. For that did the same procedure as we did before.
Once one knows the CSS selector that contains the paragraphs, he/she can use this simple R code to get all the paragraphs.
R
# Using CSS selectors to scrape# all the paragraph section# Note that we use html_nodes() hereparagraph = html_nodes(webpage, 'p') # Converting the heading data to textpText = html_text(paragraph) # Print the top 6 dataprint(head(pText))
Output:
[1] βA data structure is a particular way of organizing data in a computer so that it can be used effectively. The idea is to reduce the space and time complexities of different tasks. Data structures in R programming are tools for holding multiple values. β [2] βRβs base data structures are often organized by their dimensionality (1D, 2D, or nD) and whether theyβre homogeneous (all elements must be of the identical type) or heterogeneous (the elements are often of various types). This gives rise to the five data types which are most frequently utilized in data analysis. the subsequent table shows a transparent cut view of those data structures.β [3] βThe most essential data structures used in R include:β [4] ββ [5] βA vector is an ordered collection of basic data types of a given length. The only key thing here is all the elements of a vector must be of the identical data type e.g homogeneous data structures. Vectors are one-dimensional data structures.β [6] βExample:β
R
# R program to illustrate# Web Scraping # Import rvest librarylibrary(rvest) # Reading the HTML code from the websitewebpage = read_html("https://www.geeksforgeeks.org /data-structures-in-r-programming") # Using CSS selectors to scrape the heading sectionheading = html_node(webpage, '.entry-title') # Converting the heading data to texttext = html_text(heading)print(text) # Using CSS selectors to scrape# all the paragraph section# Note that we use html_nodes() hereparagraph = html_nodes(webpage, 'p') # Converting the heading data to textpText = html_text(paragraph) # Print the top 6 dataprint(head(pText))
Output:
[1] βData Structures in R Programmingβ
[1] βA data structure is a particular way of organizing data in a computer so that it can be used effectively. The idea is to reduce the space and time complexities of different tasks. Data structures in R programming are tools for holding multiple values. β [2] βRβs base data structures are often organized by their dimensionality (1D, 2D, or nD) and whether theyβre homogeneous (all elements must be of the identical type) or heterogeneous (the elements are often of various types). This gives rise to the five data types which are most frequently utilized in data analysis. the subsequent table shows a transparent cut view of those data structures.β [3] βThe most essential data structures used in R include:β [4] ββ [5] βA vector is an ordered collection of basic data types of a given length. The only key thing here is all the elements of a vector must be of the identical data type e.g homogeneous data structures. Vectors are one-dimensional data structures.β [6] βExample:β
sooda367
kumar_satyam
data-science
R-dataStructures
R Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
|
[
{
"code": null,
"e": 28,
"s": 0,
"text": "\n28 Dec, 2021"
},
{
"code": null,
"e": 332,
"s": 28,
"text": "One of the most important things in the field of Data Science is the skill of getting the right data for the problem you want to solve. Data Scientists donβt always have a prepared database to work on but rather have to pull data from the right sources. For this purpose, APIs and Web Scraping are used."
},
{
"code": null,
"e": 661,
"s": 332,
"text": "API (Application Program Interface): An API is a set of methods and tools that allows oneβs to query and retrieve data dynamically. Reddit, Spotify, Twitter, Facebook, and many other companies provide free APIs that enable developers to access the information they store on their servers; others charge for access to their APIs."
},
{
"code": null,
"e": 881,
"s": 661,
"text": "Web Scraping: A lot of data isnβt accessible through data sets or APIs but rather exists on the internet as Web pages. So, through web-scraping, one can access the data without waiting for the provider to create an API."
},
{
"code": null,
"e": 1449,
"s": 881,
"text": "Web scraping is a technique to fetch data from websites. While surfing on the web, many websites donβt allow the user to save data for private use. One way is to manually copy-paste the data, which both tedious and time-consuming. Web Scraping is the automatic process of data extraction from websites. This process is done with the help of web scraping software known as web scrapers. They automatically load and extract data from the websites based on user requirements. These can be custom built to work for one site or can be configured to work with any website. "
},
{
"code": null,
"e": 1816,
"s": 1449,
"text": "There are several web scraping tools out there to perform the task and various languages too, having libraries that support web scraping. Among all these languages, R is considered as one of the programming languages for Web Scraping because of features like β a rich library, easy to use, dynamically typed, etc. The commonly used web Scraping tools for R is rvest."
},
{
"code": null,
"e": 1886,
"s": 1816,
"text": "Install the package rvest in your R Studio using the following code. "
},
{
"code": null,
"e": 1912,
"s": 1886,
"text": "install.packages('rvest')"
},
{
"code": null,
"e": 2529,
"s": 1912,
"text": "Having, knowledge of HTML and CSS will be an added advantage. Itβs observed that most of the Data Scientists are not very familiar with technical knowledge of HTML and CSS. Therefore, letβs use an open-source software named Selector Gadget which will be more than sufficient for anyone in order to perform Web scraping. One can access and download the Selector Gadget extension(https://selectorgadget.com/). Consider that one has this extension installed by following the instructions from the website. Also, consider one using Google chrome and he/she can access the extension in the extension bar to the top right."
},
{
"code": null,
"e": 2637,
"s": 2529,
"text": "rvest maintained by the legendary Hadley Wickham. We can easily scrape data from webpage from this library."
},
{
"code": null,
"e": 2702,
"s": 2637,
"text": "Before starting we will import the rvest library into your code."
},
{
"code": null,
"e": 2704,
"s": 2702,
"text": "R"
},
{
"code": "library(rvest)",
"e": 2719,
"s": 2704,
"text": null
},
{
"code": null,
"e": 2798,
"s": 2719,
"text": "Read the HTML code from the webpage using read_html(). Consider this webpage. "
},
{
"code": null,
"e": 2800,
"s": 2798,
"text": "R"
},
{
"code": "webpage = read_html(\"https://www.geeksforgeeks.org /\\data-structures-in-r-programming\")",
"e": 2888,
"s": 2800,
"text": null
},
{
"code": null,
"e": 3126,
"s": 2888,
"text": "Now, letβs start by scraping the heading field. For that, use the selector gadget to get the specific CSS selectors that enclose the heading. One can click on the extension in his/her browser and select the heading field with the cursor."
},
{
"code": null,
"e": 3239,
"s": 3126,
"text": "Once one knows the CSS selector that contains the heading, he/she can use this simple R code to get the heading."
},
{
"code": null,
"e": 3241,
"s": 3239,
"text": "R"
},
{
"code": "# Using CSS selectors to scrape the heading sectionheading = html_node(webpage, '.entry-title') # Converting the heading data to texttext = html_text(heading)print(text)",
"e": 3411,
"s": 3241,
"text": null
},
{
"code": null,
"e": 3420,
"s": 3411,
"text": "Output: "
},
{
"code": null,
"e": 3459,
"s": 3420,
"text": "[1] \"Data Structures in R Programming\""
},
{
"code": null,
"e": 3553,
"s": 3459,
"text": "Now, letβs scrape the all paragraph fields. For that did the same procedure as we did before."
},
{
"code": null,
"e": 3676,
"s": 3553,
"text": "Once one knows the CSS selector that contains the paragraphs, he/she can use this simple R code to get all the paragraphs."
},
{
"code": null,
"e": 3678,
"s": 3676,
"text": "R"
},
{
"code": "# Using CSS selectors to scrape# all the paragraph section# Note that we use html_nodes() hereparagraph = html_nodes(webpage, 'p') # Converting the heading data to textpText = html_text(paragraph) # Print the top 6 dataprint(head(pText))",
"e": 3916,
"s": 3678,
"text": null
},
{
"code": null,
"e": 3924,
"s": 3916,
"text": "Output:"
},
{
"code": null,
"e": 4910,
"s": 3924,
"text": "[1] βA data structure is a particular way of organizing data in a computer so that it can be used effectively. The idea is to reduce the space and time complexities of different tasks. Data structures in R programming are tools for holding multiple values. β [2] βRβs base data structures are often organized by their dimensionality (1D, 2D, or nD) and whether theyβre homogeneous (all elements must be of the identical type) or heterogeneous (the elements are often of various types). This gives rise to the five data types which are most frequently utilized in data analysis. the subsequent table shows a transparent cut view of those data structures.β [3] βThe most essential data structures used in R include:β [4] ββ [5] βA vector is an ordered collection of basic data types of a given length. The only key thing here is all the elements of a vector must be of the identical data type e.g homogeneous data structures. Vectors are one-dimensional data structures.β [6] βExample:β "
},
{
"code": null,
"e": 4912,
"s": 4910,
"text": "R"
},
{
"code": "# R program to illustrate# Web Scraping # Import rvest librarylibrary(rvest) # Reading the HTML code from the websitewebpage = read_html(\"https://www.geeksforgeeks.org /data-structures-in-r-programming\") # Using CSS selectors to scrape the heading sectionheading = html_node(webpage, '.entry-title') # Converting the heading data to texttext = html_text(heading)print(text) # Using CSS selectors to scrape# all the paragraph section# Note that we use html_nodes() hereparagraph = html_nodes(webpage, 'p') # Converting the heading data to textpText = html_text(paragraph) # Print the top 6 dataprint(head(pText))",
"e": 5524,
"s": 4912,
"text": null
},
{
"code": null,
"e": 5532,
"s": 5524,
"text": "Output:"
},
{
"code": null,
"e": 5571,
"s": 5532,
"text": "[1] βData Structures in R Programmingβ"
},
{
"code": null,
"e": 6557,
"s": 5571,
"text": "[1] βA data structure is a particular way of organizing data in a computer so that it can be used effectively. The idea is to reduce the space and time complexities of different tasks. Data structures in R programming are tools for holding multiple values. β [2] βRβs base data structures are often organized by their dimensionality (1D, 2D, or nD) and whether theyβre homogeneous (all elements must be of the identical type) or heterogeneous (the elements are often of various types). This gives rise to the five data types which are most frequently utilized in data analysis. the subsequent table shows a transparent cut view of those data structures.β [3] βThe most essential data structures used in R include:β [4] ββ [5] βA vector is an ordered collection of basic data types of a given length. The only key thing here is all the elements of a vector must be of the identical data type e.g homogeneous data structures. Vectors are one-dimensional data structures.β [6] βExample:β "
},
{
"code": null,
"e": 6566,
"s": 6557,
"text": "sooda367"
},
{
"code": null,
"e": 6579,
"s": 6566,
"text": "kumar_satyam"
},
{
"code": null,
"e": 6592,
"s": 6579,
"text": "data-science"
},
{
"code": null,
"e": 6609,
"s": 6592,
"text": "R-dataStructures"
},
{
"code": null,
"e": 6620,
"s": 6609,
"text": "R Language"
}
] |
How to check which Azure account is logged in using PowerShell?
|
To check the logged-in Azure user account in the console using PowerShell, you can check the context of the Azure and for that Get-AZContext command is used.
Get-AzContext
If you are already logged in with multiple user accounts then there may be chances that there are multiple contexts available, to list all the available context, use the below command,
Get-AzContext -ListAvailable
You can choose the context using the Select-AZContext command.
|
[
{
"code": null,
"e": 1220,
"s": 1062,
"text": "To check the logged-in Azure user account in the console using PowerShell, you can check the context of the Azure and for that Get-AZContext command is used."
},
{
"code": null,
"e": 1234,
"s": 1220,
"text": "Get-AzContext"
},
{
"code": null,
"e": 1419,
"s": 1234,
"text": "If you are already logged in with multiple user accounts then there may be chances that there are multiple contexts available, to list all the available context, use the below command,"
},
{
"code": null,
"e": 1448,
"s": 1419,
"text": "Get-AzContext -ListAvailable"
},
{
"code": null,
"e": 1511,
"s": 1448,
"text": "You can choose the context using the Select-AZContext command."
}
] |
Lucene - Field Options
|
Field is the most important unit of the indexing process. It is the actual object containing the contents to be indexed. When we add a field, Lucene provides numerous controls on the field using the Field Options which state how much a field is to be searchable.
We add Document(s) containing Field(s) to IndexWriter where IndexWriter is used to update or create indexes.
We will now show you a step-wise approach and help you understand the various Field Options using a basic example.
Following are the various field options β
Index.ANALYZED β In this, we first analyze, then do indexing. This is used for normal text indexing. Analyzer will break the field's value into stream of tokens and each token is searchable separately.
Index.ANALYZED β In this, we first analyze, then do indexing. This is used for normal text indexing. Analyzer will break the field's value into stream of tokens and each token is searchable separately.
Index.NOT_ANALYZED β In this, we do not analyze but do indexing. This is used for complete text indexing. For example, person's names, URL etc.
Index.NOT_ANALYZED β In this, we do not analyze but do indexing. This is used for complete text indexing. For example, person's names, URL etc.
Index.ANALYZED_NO_NORMS β This is a variant of Index.ANALYZED. The Analyzer will break the field's value into stream of tokens and each token is searchable separately. However, the NORMs are not stored in the indexes. NORMS are used to boost searching and this often ends up consuming a lot of memory.
Index.ANALYZED_NO_NORMS β This is a variant of Index.ANALYZED. The Analyzer will break the field's value into stream of tokens and each token is searchable separately. However, the NORMs are not stored in the indexes. NORMS are used to boost searching and this often ends up consuming a lot of memory.
Index.Index.NOT_ANALYZED_NO_NORMS β This is variant of Index.NOT_ANALYZED. Indexing is done but NORMS are not stored in the indexes.
Index.Index.NOT_ANALYZED_NO_NORMS β This is variant of Index.NOT_ANALYZED. Indexing is done but NORMS are not stored in the indexes.
Index.NO β Field value is not searchable.
Index.NO β Field value is not searchable.
Following are the different ways in which the Field Options can be used β
To create a method to get a Lucene document from a text file.
To create a method to get a Lucene document from a text file.
To create various types of fields which are key value pairs containing keys as names and values as contents to be indexed.
To create various types of fields which are key value pairs containing keys as names and values as contents to be indexed.
To set field to be analyzed or not. In our case, only content is to be analyzed as it can contain data such as a, am, are, an, etc. which are not required in search operations.
To set field to be analyzed or not. In our case, only content is to be analyzed as it can contain data such as a, am, are, an, etc. which are not required in search operations.
To add the newly-created fields to the document object and return it to the caller method.
To add the newly-created fields to the document object and return it to the caller method.
private Document getDocument(File file) throws IOException {
Document document = new Document();
//index file contents
Field contentField = new Field(LuceneConstants.CONTENTS,
new FileReader(file));
//index file name
Field fileNameField = new Field(LuceneConstants.FILE_NAME,
file.getName(),
Field.Store.YES,Field.Index.NOT_ANALYZED);
//index file path
Field filePathField = new Field(LuceneConstants.FILE_PATH,
file.getCanonicalPath(),
Field.Store.YES,Field.Index.NOT_ANALYZED);
document.add(contentField);
document.add(fileNameField);
document.add(filePathField);
return document;
}
To test the indexing process, we need to create a Lucene application test.
Create a project with a name LuceneFirstApplication under a package com.tutorialspoint.lucene as explained in the Lucene - First Application chapter. You can also use the project created in EJB - First Application chapter as such for this chapter to understand the indexing process.
Create LuceneConstants.java,TextFileFilter.java and Indexer.java as explained in the Lucene - First Application chapter. Keep the rest of the files unchanged.
Create LuceneTester.java as mentioned below.
Clean and Build the application to make sure the business logic is working as per the requirements.
This class is used to provide various constants to be used across the sample application.
package com.tutorialspoint.lucene;
public class LuceneConstants {
public static final String CONTENTS = "contents";
public static final String FILE_NAME = "filename";
public static final String FILE_PATH = "filepath";
public static final int MAX_SEARCH = 10;
}
This class is used as a .txt file filter.
package com.tutorialspoint.lucene;
import java.io.File;
import java.io.FileFilter;
public class TextFileFilter implements FileFilter {
@Override
public boolean accept(File pathname) {
return pathname.getName().toLowerCase().endsWith(".txt");
}
}
This class is used to index the raw data so that we can make it searchable using the Lucene library.
package com.tutorialspoint.lucene;
import java.io.File;
import java.io.FileFilter;
import java.io.FileReader;
import java.io.IOException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
public class Indexer {
private IndexWriter writer;
public Indexer(String indexDirectoryPath) throws IOException {
//this directory will contain the indexes
Directory indexDirectory =
FSDirectory.open(new File(indexDirectoryPath));
//create the indexer
writer = new IndexWriter(indexDirectory,
new StandardAnalyzer(Version.LUCENE_36),true,
IndexWriter.MaxFieldLength.UNLIMITED);
}
public void close() throws CorruptIndexException, IOException {
writer.close();
}
private Document getDocument(File file) throws IOException {
Document document = new Document();
//index file contents
Field contentField = new Field(LuceneConstants.CONTENTS,
new FileReader(file));
//index file name
Field fileNameField = new Field(LuceneConstants.FILE_NAME,
file.getName(),
Field.Store.YES,Field.Index.NOT_ANALYZED);
//index file path
Field filePathField = new Field(LuceneConstants.FILE_PATH,
file.getCanonicalPath(),
Field.Store.YES,Field.Index.NOT_ANALYZED);
document.add(contentField);
document.add(fileNameField);
document.add(filePathField);
return document;
}
private void indexFile(File file) throws IOException {
System.out.println("Indexing "+file.getCanonicalPath());
Document document = getDocument(file);
writer.addDocument(document);
}
public int createIndex(String dataDirPath, FileFilter filter)
throws IOException {
//get all files in the data directory
File[] files = new File(dataDirPath).listFiles();
for (File file : files) {
if(!file.isDirectory()
&& !file.isHidden()
&& file.exists()
&& file.canRead()
&& filter.accept(file)
){
indexFile(file);
}
}
return writer.numDocs();
}
}
This class is used to test the indexing capability of the Lucene library.
package com.tutorialspoint.lucene;
import java.io.IOException;
public class LuceneTester {
String indexDir = "E:\\Lucene\\Index";
String dataDir = "E:\\Lucene\\Data";
Indexer indexer;
public static void main(String[] args) {
LuceneTester tester;
try {
tester = new LuceneTester();
tester.createIndex();
} catch (IOException e) {
e.printStackTrace();
}
}
private void createIndex() throws IOException {
indexer = new Indexer(indexDir);
int numIndexed;
long startTime = System.currentTimeMillis();
numIndexed = indexer.createIndex(dataDir, new TextFileFilter());
long endTime = System.currentTimeMillis();
indexer.close();
System.out.println(numIndexed+" File indexed, time taken: "
+(endTime-startTime)+" ms");
}
}
We have used 10 text files from record1.txt to record10.txt containing names and other details of the students and put them in the directory E:\Lucene\Data. Test Data. An index directory path should be created as E:\Lucene\Index. After running this program, you can see the list of index files created in that folder.
Once you are done with the creation of the source, the raw data, the data directory and the index directory, you can compile and run your program. To do this, keep the LuceneTester.Java file tab active and use either the Run option available in the Eclipse IDE or use Ctrl + F11 to compile and run your LuceneTester application. If your application runs successfully, it will print the following message in Eclipse IDE's console β
Indexing E:\Lucene\Data\record1.txt
Indexing E:\Lucene\Data\record10.txt
Indexing E:\Lucene\Data\record2.txt
Indexing E:\Lucene\Data\record3.txt
Indexing E:\Lucene\Data\record4.txt
Indexing E:\Lucene\Data\record5.txt
Indexing E:\Lucene\Data\record6.txt
Indexing E:\Lucene\Data\record7.txt
Indexing E:\Lucene\Data\record8.txt
Indexing E:\Lucene\Data\record9.txt
10 File indexed, time taken: 109 ms
Once you've run the program successfully, you will have following content in your index directory β
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2106,
"s": 1843,
"text": "Field is the most important unit of the indexing process. It is the actual object containing the contents to be indexed. When we add a field, Lucene provides numerous controls on the field using the Field Options which state how much a field is to be searchable."
},
{
"code": null,
"e": 2215,
"s": 2106,
"text": "We add Document(s) containing Field(s) to IndexWriter where IndexWriter is used to update or create indexes."
},
{
"code": null,
"e": 2330,
"s": 2215,
"text": "We will now show you a step-wise approach and help you understand the various Field Options using a basic example."
},
{
"code": null,
"e": 2372,
"s": 2330,
"text": "Following are the various field options β"
},
{
"code": null,
"e": 2574,
"s": 2372,
"text": "Index.ANALYZED β In this, we first analyze, then do indexing. This is used for normal text indexing. Analyzer will break the field's value into stream of tokens and each token is searchable separately."
},
{
"code": null,
"e": 2776,
"s": 2574,
"text": "Index.ANALYZED β In this, we first analyze, then do indexing. This is used for normal text indexing. Analyzer will break the field's value into stream of tokens and each token is searchable separately."
},
{
"code": null,
"e": 2920,
"s": 2776,
"text": "Index.NOT_ANALYZED β In this, we do not analyze but do indexing. This is used for complete text indexing. For example, person's names, URL etc."
},
{
"code": null,
"e": 3064,
"s": 2920,
"text": "Index.NOT_ANALYZED β In this, we do not analyze but do indexing. This is used for complete text indexing. For example, person's names, URL etc."
},
{
"code": null,
"e": 3366,
"s": 3064,
"text": "Index.ANALYZED_NO_NORMS β This is a variant of Index.ANALYZED. The Analyzer will break the field's value into stream of tokens and each token is searchable separately. However, the NORMs are not stored in the indexes. NORMS are used to boost searching and this often ends up consuming a lot of memory."
},
{
"code": null,
"e": 3668,
"s": 3366,
"text": "Index.ANALYZED_NO_NORMS β This is a variant of Index.ANALYZED. The Analyzer will break the field's value into stream of tokens and each token is searchable separately. However, the NORMs are not stored in the indexes. NORMS are used to boost searching and this often ends up consuming a lot of memory."
},
{
"code": null,
"e": 3802,
"s": 3668,
"text": "Index.Index.NOT_ANALYZED_NO_NORMS β This is variant of Index.NOT_ANALYZED. Indexing is done but NORMS are not stored in the indexes."
},
{
"code": null,
"e": 3936,
"s": 3802,
"text": "Index.Index.NOT_ANALYZED_NO_NORMS β This is variant of Index.NOT_ANALYZED. Indexing is done but NORMS are not stored in the indexes."
},
{
"code": null,
"e": 3978,
"s": 3936,
"text": "Index.NO β Field value is not searchable."
},
{
"code": null,
"e": 4020,
"s": 3978,
"text": "Index.NO β Field value is not searchable."
},
{
"code": null,
"e": 4094,
"s": 4020,
"text": "Following are the different ways in which the Field Options can be used β"
},
{
"code": null,
"e": 4156,
"s": 4094,
"text": "To create a method to get a Lucene document from a text file."
},
{
"code": null,
"e": 4218,
"s": 4156,
"text": "To create a method to get a Lucene document from a text file."
},
{
"code": null,
"e": 4341,
"s": 4218,
"text": "To create various types of fields which are key value pairs containing keys as names and values as contents to be indexed."
},
{
"code": null,
"e": 4464,
"s": 4341,
"text": "To create various types of fields which are key value pairs containing keys as names and values as contents to be indexed."
},
{
"code": null,
"e": 4641,
"s": 4464,
"text": "To set field to be analyzed or not. In our case, only content is to be analyzed as it can contain data such as a, am, are, an, etc. which are not required in search operations."
},
{
"code": null,
"e": 4818,
"s": 4641,
"text": "To set field to be analyzed or not. In our case, only content is to be analyzed as it can contain data such as a, am, are, an, etc. which are not required in search operations."
},
{
"code": null,
"e": 4909,
"s": 4818,
"text": "To add the newly-created fields to the document object and return it to the caller method."
},
{
"code": null,
"e": 5000,
"s": 4909,
"text": "To add the newly-created fields to the document object and return it to the caller method."
},
{
"code": null,
"e": 5663,
"s": 5000,
"text": "private Document getDocument(File file) throws IOException {\n Document document = new Document();\n\n //index file contents\n Field contentField = new Field(LuceneConstants.CONTENTS, \n new FileReader(file));\n \n //index file name\n Field fileNameField = new Field(LuceneConstants.FILE_NAME,\n file.getName(),\n Field.Store.YES,Field.Index.NOT_ANALYZED);\n \n //index file path\n Field filePathField = new Field(LuceneConstants.FILE_PATH,\n file.getCanonicalPath(),\n Field.Store.YES,Field.Index.NOT_ANALYZED);\n\n document.add(contentField);\n document.add(fileNameField);\n document.add(filePathField);\n\n return document;\n} "
},
{
"code": null,
"e": 5738,
"s": 5663,
"text": "To test the indexing process, we need to create a Lucene application test."
},
{
"code": null,
"e": 6021,
"s": 5738,
"text": "Create a project with a name LuceneFirstApplication under a package com.tutorialspoint.lucene as explained in the Lucene - First Application chapter. You can also use the project created in EJB - First Application chapter as such for this chapter to understand the indexing process."
},
{
"code": null,
"e": 6180,
"s": 6021,
"text": "Create LuceneConstants.java,TextFileFilter.java and Indexer.java as explained in the Lucene - First Application chapter. Keep the rest of the files unchanged."
},
{
"code": null,
"e": 6225,
"s": 6180,
"text": "Create LuceneTester.java as mentioned below."
},
{
"code": null,
"e": 6325,
"s": 6225,
"text": "Clean and Build the application to make sure the business logic is working as per the requirements."
},
{
"code": null,
"e": 6415,
"s": 6325,
"text": "This class is used to provide various constants to be used across the sample application."
},
{
"code": null,
"e": 6689,
"s": 6415,
"text": "package com.tutorialspoint.lucene;\n\npublic class LuceneConstants {\n public static final String CONTENTS = \"contents\";\n public static final String FILE_NAME = \"filename\";\n public static final String FILE_PATH = \"filepath\";\n public static final int MAX_SEARCH = 10;\n}"
},
{
"code": null,
"e": 6731,
"s": 6689,
"text": "This class is used as a .txt file filter."
},
{
"code": null,
"e": 6995,
"s": 6731,
"text": "package com.tutorialspoint.lucene;\n\nimport java.io.File;\nimport java.io.FileFilter;\n\npublic class TextFileFilter implements FileFilter {\n\n @Override\n public boolean accept(File pathname) {\n return pathname.getName().toLowerCase().endsWith(\".txt\");\n }\n}"
},
{
"code": null,
"e": 7096,
"s": 6995,
"text": "This class is used to index the raw data so that we can make it searchable using the Lucene library."
},
{
"code": null,
"e": 9561,
"s": 7096,
"text": "package com.tutorialspoint.lucene;\n\nimport java.io.File;\nimport java.io.FileFilter;\nimport java.io.FileReader;\nimport java.io.IOException;\n\nimport org.apache.lucene.analysis.standard.StandardAnalyzer;\nimport org.apache.lucene.document.Document;\nimport org.apache.lucene.document.Field;\nimport org.apache.lucene.index.CorruptIndexException;\nimport org.apache.lucene.index.IndexWriter;\nimport org.apache.lucene.store.Directory;\nimport org.apache.lucene.store.FSDirectory;\nimport org.apache.lucene.util.Version;\n\npublic class Indexer {\n\n private IndexWriter writer;\n\n public Indexer(String indexDirectoryPath) throws IOException {\n //this directory will contain the indexes\n Directory indexDirectory = \n FSDirectory.open(new File(indexDirectoryPath));\n\n //create the indexer\n writer = new IndexWriter(indexDirectory, \n new StandardAnalyzer(Version.LUCENE_36),true,\n IndexWriter.MaxFieldLength.UNLIMITED);\n }\n\n public void close() throws CorruptIndexException, IOException {\n writer.close();\n }\n\n private Document getDocument(File file) throws IOException {\n Document document = new Document();\n\n //index file contents\n Field contentField = new Field(LuceneConstants.CONTENTS, \n new FileReader(file));\n \n //index file name\n Field fileNameField = new Field(LuceneConstants.FILE_NAME,\n file.getName(),\n Field.Store.YES,Field.Index.NOT_ANALYZED);\n \n //index file path\n Field filePathField = new Field(LuceneConstants.FILE_PATH,\n file.getCanonicalPath(),\n Field.Store.YES,Field.Index.NOT_ANALYZED);\n\n document.add(contentField);\n document.add(fileNameField);\n document.add(filePathField);\n\n return document;\n } \n\n private void indexFile(File file) throws IOException {\n System.out.println(\"Indexing \"+file.getCanonicalPath());\n Document document = getDocument(file);\n writer.addDocument(document);\n }\n\n public int createIndex(String dataDirPath, FileFilter filter) \n throws IOException {\n //get all files in the data directory\n File[] files = new File(dataDirPath).listFiles();\n\n for (File file : files) {\n if(!file.isDirectory()\n && !file.isHidden()\n && file.exists()\n && file.canRead()\n && filter.accept(file)\n ){\n indexFile(file);\n }\n }\n return writer.numDocs();\n }\n}"
},
{
"code": null,
"e": 9635,
"s": 9561,
"text": "This class is used to test the indexing capability of the Lucene library."
},
{
"code": null,
"e": 10485,
"s": 9635,
"text": "package com.tutorialspoint.lucene;\n\nimport java.io.IOException;\n\npublic class LuceneTester {\n\t\n String indexDir = \"E:\\\\Lucene\\\\Index\";\n String dataDir = \"E:\\\\Lucene\\\\Data\";\n Indexer indexer;\n \n public static void main(String[] args) {\n LuceneTester tester;\n try {\n tester = new LuceneTester();\n tester.createIndex();\n } catch (IOException e) {\n e.printStackTrace();\n } \n }\n\n private void createIndex() throws IOException {\n indexer = new Indexer(indexDir);\n int numIndexed;\n long startTime = System.currentTimeMillis();\t\n numIndexed = indexer.createIndex(dataDir, new TextFileFilter());\n long endTime = System.currentTimeMillis();\n indexer.close();\n System.out.println(numIndexed+\" File indexed, time taken: \"\n +(endTime-startTime)+\" ms\");\t\t\n }\n}"
},
{
"code": null,
"e": 10803,
"s": 10485,
"text": "We have used 10 text files from record1.txt to record10.txt containing names and other details of the students and put them in the directory E:\\Lucene\\Data. Test Data. An index directory path should be created as E:\\Lucene\\Index. After running this program, you can see the list of index files created in that folder."
},
{
"code": null,
"e": 11234,
"s": 10803,
"text": "Once you are done with the creation of the source, the raw data, the data directory and the index directory, you can compile and run your program. To do this, keep the LuceneTester.Java file tab active and use either the Run option available in the Eclipse IDE or use Ctrl + F11 to compile and run your LuceneTester application. If your application runs successfully, it will print the following message in Eclipse IDE's console β"
},
{
"code": null,
"e": 11632,
"s": 11234,
"text": "Indexing E:\\Lucene\\Data\\record1.txt\nIndexing E:\\Lucene\\Data\\record10.txt\nIndexing E:\\Lucene\\Data\\record2.txt\nIndexing E:\\Lucene\\Data\\record3.txt\nIndexing E:\\Lucene\\Data\\record4.txt\nIndexing E:\\Lucene\\Data\\record5.txt\nIndexing E:\\Lucene\\Data\\record6.txt\nIndexing E:\\Lucene\\Data\\record7.txt\nIndexing E:\\Lucene\\Data\\record8.txt\nIndexing E:\\Lucene\\Data\\record9.txt\n10 File indexed, time taken: 109 ms\n"
},
{
"code": null,
"e": 11732,
"s": 11632,
"text": "Once you've run the program successfully, you will have following content in your index directory β"
},
{
"code": null,
"e": 11739,
"s": 11732,
"text": " Print"
},
{
"code": null,
"e": 11750,
"s": 11739,
"text": " Add Notes"
}
] |
How to extract each (English) word from a string using regular expression in Java?
|
The regular expression β[a-zA-Z]+β matches one or the English alphabet. Therefore, to extract each word in the given input string β
Compile the above expression of the compile() method of the Pattern class.
Compile the above expression of the compile() method of the Pattern class.
Get the Matcher object bypassing the required input string as a parameter to the matcher() method of the Pattern class.
Get the Matcher object bypassing the required input string as a parameter to the matcher() method of the Pattern class.
Finally, for each match get the matched characters by invoking the group() method.
Finally, for each match get the matched characters by invoking the group() method.
import java.util.Scanner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class EachWordExample {
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
System.out.println("Enter sample text: ");
String data = sc.nextLine();
String regex = "[a-zA-Z]+";
//Creating a pattern object
Pattern pattern = Pattern.compile(regex);
//Creating a Matcher object
Matcher matcher = pattern.matcher(data);
System.out.println("Words in the given String: ");
while(matcher.find()) {
System.out.println(matcher.group()+" ");
}
}
}
Enter sample text:
Hello this is a sample text
Words in the given String:
Hello
this
is
a
sample
text
|
[
{
"code": null,
"e": 1194,
"s": 1062,
"text": "The regular expression β[a-zA-Z]+β matches one or the English alphabet. Therefore, to extract each word in the given input string β"
},
{
"code": null,
"e": 1269,
"s": 1194,
"text": "Compile the above expression of the compile() method of the Pattern class."
},
{
"code": null,
"e": 1344,
"s": 1269,
"text": "Compile the above expression of the compile() method of the Pattern class."
},
{
"code": null,
"e": 1464,
"s": 1344,
"text": "Get the Matcher object bypassing the required input string as a parameter to the matcher() method of the Pattern class."
},
{
"code": null,
"e": 1584,
"s": 1464,
"text": "Get the Matcher object bypassing the required input string as a parameter to the matcher() method of the Pattern class."
},
{
"code": null,
"e": 1667,
"s": 1584,
"text": "Finally, for each match get the matched characters by invoking the group() method."
},
{
"code": null,
"e": 1750,
"s": 1667,
"text": "Finally, for each match get the matched characters by invoking the group() method."
},
{
"code": null,
"e": 2391,
"s": 1750,
"text": "import java.util.Scanner;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\npublic class EachWordExample {\n public static void main(String[] args) {\n Scanner sc = new Scanner(System.in);\n System.out.println(\"Enter sample text: \");\n String data = sc.nextLine();\n String regex = \"[a-zA-Z]+\";\n //Creating a pattern object\n Pattern pattern = Pattern.compile(regex);\n //Creating a Matcher object\n Matcher matcher = pattern.matcher(data);\n System.out.println(\"Words in the given String: \");\n while(matcher.find()) {\n System.out.println(matcher.group()+\" \");\n }\n }\n}"
},
{
"code": null,
"e": 2493,
"s": 2391,
"text": "Enter sample text:\nHello this is a sample text\nWords in the given String:\nHello\nthis\nis\na\nsample\ntext"
}
] |
How to use a pre-trained model (VGG) for image classification | by Dr. Saptarsi Goswami | Towards Data Science
|
Hi Guys, today I am going to talk about how to use a VGG Model as a pre-trained model. Letβs take tiny steps
VGG models are a type of CNN Architecture proposed by Karen Simonyan & Andrew Zisserman of Visual Geometry Group (VGG), Oxford University, which brought remarkable results for the ImageNet Challenge.
They experiment with 6 models, with different numbers of trainable layers. Based on the number of models the two most popular models are VGG16 and VGG19.
Before, we proceed, we should answer what is this CNN Architecture and also about ImageNet.
For interested readers, you can refer to the following table to know about all the ConvNet families that the authors experimented with.
Well, CNN is a specialized deep neural network model for handling image data.
It does not need the traditional image processing filters like the edge, histogram, texture, etc., rather on CNN, the filters are learnable. So, these need not be determined through trial and error.
CNN has two parts, the first part is a feature learning part and then there is a classification layer (Often referred to as the Fully Connected Layer)
The main two building blocks of the feature learning part are the convolution layer and pooling layers
Convolution Layer: The learnable filters or the feature extractors we talked about.
Pooling Layer: This does some spatial compression also brings about invariance. A car will be a car, even if it is rotated a little bit.
Figure 2, gives an architectural overview of CNN. Convolutions create feature maps, Pooling is achieved through subsampling.
In case you need a more detailed explanation, you can look here.
These are models, which are networks with a large number of parameters ( A Case in point is VGG16, which has 138 Million Parameters)
Generally, training such a network is time and resource-consuming
The pre-trained models for CV mostly are pretty general-purpose too
We can use directly use these models if we pick up any of the 1000 classes it is trained with
Even if itβs a little bit different, we can remove the top layer and train the weight of that layer only (Transfer Learning)
This was an initiative taken by Stanford Professor Fei-Fei Li in collaboration with wordnet from 2006. The image annotations were crowdsourced. This actually made the testbed of computer vision tasks really very robust, large, and expensive. Based on ImageNet a 1000 class classification challenge started with the name ImageNet Large Scale Visual Recognition Challenge (ILSVRC).
Actually, this competition is responsible for the birth of most of the prominent CNN models.
Step 1: Import the model
from keras.applications.vgg16 import VGG16model = VGG16(weights='imagenet')print(model.summary())
There are many other CNN models are available, which can be found here.
Step 2: Loading a sample image
from tensorflow.keras.preprocessing import imagefrom tensorflow.keras.applications.vgg16 import preprocess_input,decode_predictionsimport numpy as npimg_path = '/kaggle/input/images/dog.jpg'#There is an interpolation method to match the source size with the target size#image loaded in PIL (Python Imaging Library)img = image.load_img(img_path,color_mode='rgb', target_size=(224, 224))display(img)
The test image, that we are using a Golder Retriever, also please note the image is loaded in a Python Image Library (PIL) format
Step 3: Making the image size compatible with VGG16 input
# Converts a PIL Image to 3D Numy Arrayx = image.img_to_array(img)x.shape# Adding the fouth dimension, for number of imagesx = np.expand_dims(x, axis=0)
Here, the PIL Image is converted to a 3d Array first, an image in RGB format is a 3D Array. Then another dimension is added for a number of images. So, the input is actually a 4D array.
Step 4: Making the prediction
#mean centering with respect to Imagex = preprocess_input(x)features = model.predict(x)p = decode_predictions(features)
In this step a simple pre-processing of mean centering is done, then the prediction is made, and finally, the prediction, which is a probability distribution is decoded to comprehensible class names. We have used this in the default top-5 probable class mode.
Output
[[('n02099601', 'golden_retriever', 0.8579672), ('n02099267', 'flat-coated_retriever', 0.018425034), ('n04409515', 'tennis_ball', 0.01615624), ('n02099712', 'Labrador_retriever', 0.015078514), ('n02099849', 'Chesapeake_Bay_retriever', 0.012522769)]]
If we use a bar chart, this is how it will look like
So, without creating a model and training it, we could classify an image of Golder Retriever perfectly.
The pre-trained models are like magic, we can just download the models and start using them, even without any data and training.
If the source task and the target task is different then there is some similarity between the domains then we may have to train few layers, but still, it will not be so extensive as training from scratch and will need much less data
[1] https://www.kaggle.com/saptarsi/using-pre-trained-vgg-model
[2] Simonyan, Karen, and Andrew Zisserman. βVery deep convolutional networks for large-scale image recognition.β arXiv preprint arXiv:1409.1556 (2014).
|
[
{
"code": null,
"e": 281,
"s": 172,
"text": "Hi Guys, today I am going to talk about how to use a VGG Model as a pre-trained model. Letβs take tiny steps"
},
{
"code": null,
"e": 481,
"s": 281,
"text": "VGG models are a type of CNN Architecture proposed by Karen Simonyan & Andrew Zisserman of Visual Geometry Group (VGG), Oxford University, which brought remarkable results for the ImageNet Challenge."
},
{
"code": null,
"e": 635,
"s": 481,
"text": "They experiment with 6 models, with different numbers of trainable layers. Based on the number of models the two most popular models are VGG16 and VGG19."
},
{
"code": null,
"e": 727,
"s": 635,
"text": "Before, we proceed, we should answer what is this CNN Architecture and also about ImageNet."
},
{
"code": null,
"e": 863,
"s": 727,
"text": "For interested readers, you can refer to the following table to know about all the ConvNet families that the authors experimented with."
},
{
"code": null,
"e": 941,
"s": 863,
"text": "Well, CNN is a specialized deep neural network model for handling image data."
},
{
"code": null,
"e": 1140,
"s": 941,
"text": "It does not need the traditional image processing filters like the edge, histogram, texture, etc., rather on CNN, the filters are learnable. So, these need not be determined through trial and error."
},
{
"code": null,
"e": 1291,
"s": 1140,
"text": "CNN has two parts, the first part is a feature learning part and then there is a classification layer (Often referred to as the Fully Connected Layer)"
},
{
"code": null,
"e": 1394,
"s": 1291,
"text": "The main two building blocks of the feature learning part are the convolution layer and pooling layers"
},
{
"code": null,
"e": 1478,
"s": 1394,
"text": "Convolution Layer: The learnable filters or the feature extractors we talked about."
},
{
"code": null,
"e": 1615,
"s": 1478,
"text": "Pooling Layer: This does some spatial compression also brings about invariance. A car will be a car, even if it is rotated a little bit."
},
{
"code": null,
"e": 1740,
"s": 1615,
"text": "Figure 2, gives an architectural overview of CNN. Convolutions create feature maps, Pooling is achieved through subsampling."
},
{
"code": null,
"e": 1805,
"s": 1740,
"text": "In case you need a more detailed explanation, you can look here."
},
{
"code": null,
"e": 1938,
"s": 1805,
"text": "These are models, which are networks with a large number of parameters ( A Case in point is VGG16, which has 138 Million Parameters)"
},
{
"code": null,
"e": 2004,
"s": 1938,
"text": "Generally, training such a network is time and resource-consuming"
},
{
"code": null,
"e": 2072,
"s": 2004,
"text": "The pre-trained models for CV mostly are pretty general-purpose too"
},
{
"code": null,
"e": 2166,
"s": 2072,
"text": "We can use directly use these models if we pick up any of the 1000 classes it is trained with"
},
{
"code": null,
"e": 2291,
"s": 2166,
"text": "Even if itβs a little bit different, we can remove the top layer and train the weight of that layer only (Transfer Learning)"
},
{
"code": null,
"e": 2671,
"s": 2291,
"text": "This was an initiative taken by Stanford Professor Fei-Fei Li in collaboration with wordnet from 2006. The image annotations were crowdsourced. This actually made the testbed of computer vision tasks really very robust, large, and expensive. Based on ImageNet a 1000 class classification challenge started with the name ImageNet Large Scale Visual Recognition Challenge (ILSVRC)."
},
{
"code": null,
"e": 2764,
"s": 2671,
"text": "Actually, this competition is responsible for the birth of most of the prominent CNN models."
},
{
"code": null,
"e": 2789,
"s": 2764,
"text": "Step 1: Import the model"
},
{
"code": null,
"e": 2887,
"s": 2789,
"text": "from keras.applications.vgg16 import VGG16model = VGG16(weights='imagenet')print(model.summary())"
},
{
"code": null,
"e": 2959,
"s": 2887,
"text": "There are many other CNN models are available, which can be found here."
},
{
"code": null,
"e": 2990,
"s": 2959,
"text": "Step 2: Loading a sample image"
},
{
"code": null,
"e": 3388,
"s": 2990,
"text": "from tensorflow.keras.preprocessing import imagefrom tensorflow.keras.applications.vgg16 import preprocess_input,decode_predictionsimport numpy as npimg_path = '/kaggle/input/images/dog.jpg'#There is an interpolation method to match the source size with the target size#image loaded in PIL (Python Imaging Library)img = image.load_img(img_path,color_mode='rgb', target_size=(224, 224))display(img)"
},
{
"code": null,
"e": 3518,
"s": 3388,
"text": "The test image, that we are using a Golder Retriever, also please note the image is loaded in a Python Image Library (PIL) format"
},
{
"code": null,
"e": 3576,
"s": 3518,
"text": "Step 3: Making the image size compatible with VGG16 input"
},
{
"code": null,
"e": 3729,
"s": 3576,
"text": "# Converts a PIL Image to 3D Numy Arrayx = image.img_to_array(img)x.shape# Adding the fouth dimension, for number of imagesx = np.expand_dims(x, axis=0)"
},
{
"code": null,
"e": 3915,
"s": 3729,
"text": "Here, the PIL Image is converted to a 3d Array first, an image in RGB format is a 3D Array. Then another dimension is added for a number of images. So, the input is actually a 4D array."
},
{
"code": null,
"e": 3945,
"s": 3915,
"text": "Step 4: Making the prediction"
},
{
"code": null,
"e": 4065,
"s": 3945,
"text": "#mean centering with respect to Imagex = preprocess_input(x)features = model.predict(x)p = decode_predictions(features)"
},
{
"code": null,
"e": 4325,
"s": 4065,
"text": "In this step a simple pre-processing of mean centering is done, then the prediction is made, and finally, the prediction, which is a probability distribution is decoded to comprehensible class names. We have used this in the default top-5 probable class mode."
},
{
"code": null,
"e": 4332,
"s": 4325,
"text": "Output"
},
{
"code": null,
"e": 4586,
"s": 4332,
"text": "[[('n02099601', 'golden_retriever', 0.8579672), ('n02099267', 'flat-coated_retriever', 0.018425034), ('n04409515', 'tennis_ball', 0.01615624), ('n02099712', 'Labrador_retriever', 0.015078514), ('n02099849', 'Chesapeake_Bay_retriever', 0.012522769)]]"
},
{
"code": null,
"e": 4639,
"s": 4586,
"text": "If we use a bar chart, this is how it will look like"
},
{
"code": null,
"e": 4743,
"s": 4639,
"text": "So, without creating a model and training it, we could classify an image of Golder Retriever perfectly."
},
{
"code": null,
"e": 4872,
"s": 4743,
"text": "The pre-trained models are like magic, we can just download the models and start using them, even without any data and training."
},
{
"code": null,
"e": 5105,
"s": 4872,
"text": "If the source task and the target task is different then there is some similarity between the domains then we may have to train few layers, but still, it will not be so extensive as training from scratch and will need much less data"
},
{
"code": null,
"e": 5169,
"s": 5105,
"text": "[1] https://www.kaggle.com/saptarsi/using-pre-trained-vgg-model"
}
] |
TDA To Rule Them All: ToMATo Clustering | by Meryll Dindin | Towards Data Science
|
Do you miss applied mathematics ? Once again, my goal is to promote Topological Data Analysis and the multiple possibilities it offers. The previous article mentioned machine-learning and deep-learning, but there is a field among others in which TDA find purposes: clustering.
towardsdatascience.com
The concept of clustering powered by TDA has been introduced into the (not so) famous ToMATo (Topological Mode Analysis Tool) algorithm, introduced in this paper. This algorithm has two strengths, compared to more usual (scikit-learn kind) clustering algorithms:
It gives you a way to know how many clusters your data seems to have;
It gives you a way to gather this data into distinct sets for clustering.
I developed the code relative to this article as a proof of concept, and there are surely elements that may be improved ! Do not ditch taking a look, and Iβll be happy to receive your critics ;) !
For explanation purposes, I will limit myself to two-dimensional considerations. The core idea is to apply TDA on the density estimate of our data points, to extract its maxima, and thus extract its apparent centroids. Great, but how ?
To get back to practical TDA, youβll need to build the simplex tree* (nested family of simplicial complex*, ending up in basically being a graph), corresponding to that density function.
First, compute the density estimate and initialize the structure;
Second, give an index to every point of your data set;
Third, insert each data point through their index into your simplex tree by attributing them the corresponding density value as filtration* value;
Fourth, link each point to its neighborhood graph through their averaged filtration values.
You end up with an instantiated graph and you can compute the corresponding persistence by upper-levels filtering (~ defines the nested family of simplicial complex through a decreasing ordering of the obtained filtration values), for which further illustrations are given on my Github repository.
(*) See Below for Concept Explanations
By computing the persistence diagram (and persistence barcode) of such filtration, you obtain something like that:
gen = ClusterGenerator(structure=βanisotropyβ, randomize=45)clf = ToMaTo(*gen.generate())_ = clf.estimate_clusters(graph=True)
The previous results gives you real insights about the data: Two elements do show off of the diagonal, corresponding to objects with the biggest persistence. Persistence, in this case, characterizes how much the structure that has been created is difficult to overlap in your graph. It basically refers to global maxima (for upper-levels filtering) or global minima (for sub-levels filtering). In our case, we just observe that your data owns two centroids, corresponding to two density peaks. Now, you need to link each data point to those two centroids to build your distinct clusters. This is achievable thanks to an UnionFind structure (disjoint-set data structure), and follows the idea presented in the algorithm below.
The concept is solely based on the decreasing order of the filtration values. A good point is that you will just need a one-pass over all your data points. For each ordered data point, you then have two possibilities:
either it has no neighbors that do have higher filtration values, then it is considered as a local density maximum;
nor it has, and you need to look over the different neighbors, and merge the roots to link that same edge to its corresponding root of heaviest weight (the closer to a given centroid, the higher the weight is).
To avoid the creation of multiple clusters, there is a condition over the filtration values given by tau, which makes sure small clusters will be linked to bigger ones. Pythonized, it gives you the following (extracted from a larger function, explaining the object methods):
After comparing the different roots/parents of each data point:
gen = ClusterGenerator(structure='moons', randomize=45)clf = ToMaTo(*gen.generate())_ = clf.estimate_density(graph=False)clf.estimate_clusters(graph=False)prd = clf.fit_predict(num_clusters=2, graph=True)
Other examples are provided in my Github repository, as the one dealing with anisotrope data, that k-means (one of the most commonly used clustering algorithm) has troubles to cluster (see scikit-learn example).
gen = ClusterGenerator(structure='anisotropy', randomize=45)clf = ToMaTo(*gen.generate())_ = clf.estimate_density(graph=False)clf.estimate_clusters(graph=False)prd = clf.fit_predict(num_clusters=3, graph=True)
For the ones interested about the inherent theory and its mathematical formalism, here are some elements that may talk to you.
There are lots of possibilities emerging from this theory, and the interface between those results and deep-learning are currently still under development. Lots of ideas are to come, which make the topic hot and really interesting ! Stay tuned for the incoming articles, and clap if you want more ;) !
Gudhi package
Persistence-Based Clustering in Riemannian Manifolds
The Simplex Tree: An Efficient Data Structure for General Simplicial Complexes
|
[
{
"code": null,
"e": 449,
"s": 172,
"text": "Do you miss applied mathematics ? Once again, my goal is to promote Topological Data Analysis and the multiple possibilities it offers. The previous article mentioned machine-learning and deep-learning, but there is a field among others in which TDA find purposes: clustering."
},
{
"code": null,
"e": 472,
"s": 449,
"text": "towardsdatascience.com"
},
{
"code": null,
"e": 735,
"s": 472,
"text": "The concept of clustering powered by TDA has been introduced into the (not so) famous ToMATo (Topological Mode Analysis Tool) algorithm, introduced in this paper. This algorithm has two strengths, compared to more usual (scikit-learn kind) clustering algorithms:"
},
{
"code": null,
"e": 805,
"s": 735,
"text": "It gives you a way to know how many clusters your data seems to have;"
},
{
"code": null,
"e": 879,
"s": 805,
"text": "It gives you a way to gather this data into distinct sets for clustering."
},
{
"code": null,
"e": 1076,
"s": 879,
"text": "I developed the code relative to this article as a proof of concept, and there are surely elements that may be improved ! Do not ditch taking a look, and Iβll be happy to receive your critics ;) !"
},
{
"code": null,
"e": 1312,
"s": 1076,
"text": "For explanation purposes, I will limit myself to two-dimensional considerations. The core idea is to apply TDA on the density estimate of our data points, to extract its maxima, and thus extract its apparent centroids. Great, but how ?"
},
{
"code": null,
"e": 1499,
"s": 1312,
"text": "To get back to practical TDA, youβll need to build the simplex tree* (nested family of simplicial complex*, ending up in basically being a graph), corresponding to that density function."
},
{
"code": null,
"e": 1565,
"s": 1499,
"text": "First, compute the density estimate and initialize the structure;"
},
{
"code": null,
"e": 1620,
"s": 1565,
"text": "Second, give an index to every point of your data set;"
},
{
"code": null,
"e": 1767,
"s": 1620,
"text": "Third, insert each data point through their index into your simplex tree by attributing them the corresponding density value as filtration* value;"
},
{
"code": null,
"e": 1859,
"s": 1767,
"text": "Fourth, link each point to its neighborhood graph through their averaged filtration values."
},
{
"code": null,
"e": 2157,
"s": 1859,
"text": "You end up with an instantiated graph and you can compute the corresponding persistence by upper-levels filtering (~ defines the nested family of simplicial complex through a decreasing ordering of the obtained filtration values), for which further illustrations are given on my Github repository."
},
{
"code": null,
"e": 2196,
"s": 2157,
"text": "(*) See Below for Concept Explanations"
},
{
"code": null,
"e": 2311,
"s": 2196,
"text": "By computing the persistence diagram (and persistence barcode) of such filtration, you obtain something like that:"
},
{
"code": null,
"e": 2438,
"s": 2311,
"text": "gen = ClusterGenerator(structure=βanisotropyβ, randomize=45)clf = ToMaTo(*gen.generate())_ = clf.estimate_clusters(graph=True)"
},
{
"code": null,
"e": 3164,
"s": 2438,
"text": "The previous results gives you real insights about the data: Two elements do show off of the diagonal, corresponding to objects with the biggest persistence. Persistence, in this case, characterizes how much the structure that has been created is difficult to overlap in your graph. It basically refers to global maxima (for upper-levels filtering) or global minima (for sub-levels filtering). In our case, we just observe that your data owns two centroids, corresponding to two density peaks. Now, you need to link each data point to those two centroids to build your distinct clusters. This is achievable thanks to an UnionFind structure (disjoint-set data structure), and follows the idea presented in the algorithm below."
},
{
"code": null,
"e": 3382,
"s": 3164,
"text": "The concept is solely based on the decreasing order of the filtration values. A good point is that you will just need a one-pass over all your data points. For each ordered data point, you then have two possibilities:"
},
{
"code": null,
"e": 3498,
"s": 3382,
"text": "either it has no neighbors that do have higher filtration values, then it is considered as a local density maximum;"
},
{
"code": null,
"e": 3709,
"s": 3498,
"text": "nor it has, and you need to look over the different neighbors, and merge the roots to link that same edge to its corresponding root of heaviest weight (the closer to a given centroid, the higher the weight is)."
},
{
"code": null,
"e": 3984,
"s": 3709,
"text": "To avoid the creation of multiple clusters, there is a condition over the filtration values given by tau, which makes sure small clusters will be linked to bigger ones. Pythonized, it gives you the following (extracted from a larger function, explaining the object methods):"
},
{
"code": null,
"e": 4048,
"s": 3984,
"text": "After comparing the different roots/parents of each data point:"
},
{
"code": null,
"e": 4253,
"s": 4048,
"text": "gen = ClusterGenerator(structure='moons', randomize=45)clf = ToMaTo(*gen.generate())_ = clf.estimate_density(graph=False)clf.estimate_clusters(graph=False)prd = clf.fit_predict(num_clusters=2, graph=True)"
},
{
"code": null,
"e": 4465,
"s": 4253,
"text": "Other examples are provided in my Github repository, as the one dealing with anisotrope data, that k-means (one of the most commonly used clustering algorithm) has troubles to cluster (see scikit-learn example)."
},
{
"code": null,
"e": 4675,
"s": 4465,
"text": "gen = ClusterGenerator(structure='anisotropy', randomize=45)clf = ToMaTo(*gen.generate())_ = clf.estimate_density(graph=False)clf.estimate_clusters(graph=False)prd = clf.fit_predict(num_clusters=3, graph=True)"
},
{
"code": null,
"e": 4802,
"s": 4675,
"text": "For the ones interested about the inherent theory and its mathematical formalism, here are some elements that may talk to you."
},
{
"code": null,
"e": 5104,
"s": 4802,
"text": "There are lots of possibilities emerging from this theory, and the interface between those results and deep-learning are currently still under development. Lots of ideas are to come, which make the topic hot and really interesting ! Stay tuned for the incoming articles, and clap if you want more ;) !"
},
{
"code": null,
"e": 5118,
"s": 5104,
"text": "Gudhi package"
},
{
"code": null,
"e": 5171,
"s": 5118,
"text": "Persistence-Based Clustering in Riemannian Manifolds"
}
] |
SQL Online Quiz
|
Following quiz provides Multiple Choice Questions (MCQs) related to SQL. You will have to read all the given answers and click over the correct answer. If you are not sure about the answer then you can check the answer using Show Answer button. You can use Next Quiz button to check new set of questions in the quiz.
Q 1 - Consider the following schema β
STUDENTS(student_code, first_name, last_name, email,
phone_no, date_of_birth, honours_subject, percentage_of_marks);
Which of the following query would display names of all the students whose honours subject is English, or honours subject is Spanish and percentage of marks more than 80?
A - select first_name, last name from students where (honours_subject = βEnglishβ or honours_subject = βSpanishβ ) and percentage_of_marks > 80;
B - select first_name, last name from students where honours_subject = βEnglishβ or honours_subject = βSpanishβ and percentage_of_marks > 80;
C - select first_name, last name from students where honours_subject = βEnglishβ and honours_subject = βSpanishβ or percentage_of_marks > 80;
D - select first_name, last name from students where (honours_subject = βEnglishβ) and honours_subject = βSpanishβ and percentage_of_marks > 80;
Q 2 - What is returned by TRUNC(789.8389, 2)?
A - 789.84
B - 789.83
C - 78
D - 789.00
Q 3 - Consider the following schema β
HONOURS_SUBJECT(subject_code, subject_name, department_head);
LOCATIONS(subject_code, department_name, location_id, city);
Select the right query for retrieving records from the tables HONOURS_SUBJECT and LOCATIONS with the USING clause
A - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h join location l using(subject_code);
B - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h natural join location l using(subject_code);
C - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h, location l using(subject_code);
D - None of the above.
Q 4 - Consider the following schema β
HONOURS_SUBJECT(subject_code, subject_name, department_head);
LOCATIONS(subject_code, department_name, location_id, city);
Select the right query for retrieving records from the tables HONOURS_SUBJECT and LOCATIONS with a full outer join
A - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h full outer join location l on(h.subject_code = l.subject_code);
B - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h full outer join location l on(subject_code);
C - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h full outer join location l where (h.subject_code = l.subject_code);
D - None of the above.
Q 5 - A subquery can be placed in which of the SQL clauses?
A - The WHERE clause
B - The HAVING clause
C - The FROM clause
D - All of the above.
Q 6 - In which of the following cases a DML statement is executed?
A - When new rows are added to a table.
B - When a table is created.
C - When a transaction is committed.
D - None of the above.
Q 7 - Which of the following is true about the SQL transaction control statements?
A - They ensure data consistency.
B - They allow preview of data changes before making permanent changes in data.
C - They group logically related operations.
D - All are true.
Q 8 - Which of the following is not true about complex views?
A - They derive data from more than one table.
B - They contain no functions or grouping.
C - You cannot perform DML operations through a complex view.
D - All of the above are true.
Q 9 - Which of the following is not true about the Pseudocolumns that return the sequence values?
A - NEXTVAL returns the next available sequence value.
B - CURRVAL gets the current sequence value.
C - PREVVAL gets the previous sequence value.
D - None of the above.
Q 10 - Which of the following is not a developerβs privilege?
A - CREATE USER
B - CREATE TABLE
C - CREATE VIEW
D - CREATE SEQUENCE
42 Lectures
5 hours
Anadi Sharma
14 Lectures
2 hours
Anadi Sharma
44 Lectures
4.5 hours
Anadi Sharma
94 Lectures
7 hours
Abhishek And Pukhraj
80 Lectures
6.5 hours
Oracle Master Training | 150,000+ Students Worldwide
31 Lectures
6 hours
Eduonix Learning Solutions
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2770,
"s": 2453,
"text": "Following quiz provides Multiple Choice Questions (MCQs) related to SQL. You will have to read all the given answers and click over the correct answer. If you are not sure about the answer then you can check the answer using Show Answer button. You can use Next Quiz button to check new set of questions in the quiz."
},
{
"code": null,
"e": 2808,
"s": 2770,
"text": "Q 1 - Consider the following schema β"
},
{
"code": null,
"e": 2935,
"s": 2808,
"text": "STUDENTS(student_code, first_name, last_name, email, \n phone_no, date_of_birth, honours_subject, percentage_of_marks);"
},
{
"code": null,
"e": 3106,
"s": 2935,
"text": "Which of the following query would display names of all the students whose honours subject is English, or honours subject is Spanish and percentage of marks more than 80?"
},
{
"code": null,
"e": 3251,
"s": 3106,
"text": "A - select first_name, last name from students where (honours_subject = βEnglishβ or honours_subject = βSpanishβ ) and percentage_of_marks > 80;"
},
{
"code": null,
"e": 3393,
"s": 3251,
"text": "B - select first_name, last name from students where honours_subject = βEnglishβ or honours_subject = βSpanishβ and percentage_of_marks > 80;"
},
{
"code": null,
"e": 3535,
"s": 3393,
"text": "C - select first_name, last name from students where honours_subject = βEnglishβ and honours_subject = βSpanishβ or percentage_of_marks > 80;"
},
{
"code": null,
"e": 3680,
"s": 3535,
"text": "D - select first_name, last name from students where (honours_subject = βEnglishβ) and honours_subject = βSpanishβ and percentage_of_marks > 80;"
},
{
"code": null,
"e": 3726,
"s": 3680,
"text": "Q 2 - What is returned by TRUNC(789.8389, 2)?"
},
{
"code": null,
"e": 3737,
"s": 3726,
"text": "A - 789.84"
},
{
"code": null,
"e": 3748,
"s": 3737,
"text": "B - 789.83"
},
{
"code": null,
"e": 3755,
"s": 3748,
"text": "C - 78"
},
{
"code": null,
"e": 3766,
"s": 3755,
"text": "D - 789.00"
},
{
"code": null,
"e": 3804,
"s": 3766,
"text": "Q 3 - Consider the following schema β"
},
{
"code": null,
"e": 3866,
"s": 3804,
"text": "HONOURS_SUBJECT(subject_code, subject_name, department_head);"
},
{
"code": null,
"e": 3927,
"s": 3866,
"text": "LOCATIONS(subject_code, department_name, location_id, city);"
},
{
"code": null,
"e": 4041,
"s": 3927,
"text": "Select the right query for retrieving records from the tables HONOURS_SUBJECT and LOCATIONS with the USING clause"
},
{
"code": null,
"e": 4173,
"s": 4041,
"text": "A - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h join location l using(subject_code);"
},
{
"code": null,
"e": 4313,
"s": 4173,
"text": "B - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h natural join location l using(subject_code);"
},
{
"code": null,
"e": 4442,
"s": 4313,
"text": "C - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h, location l using(subject_code);"
},
{
"code": null,
"e": 4465,
"s": 4442,
"text": "D - None of the above."
},
{
"code": null,
"e": 4503,
"s": 4465,
"text": "Q 4 - Consider the following schema β"
},
{
"code": null,
"e": 4565,
"s": 4503,
"text": "HONOURS_SUBJECT(subject_code, subject_name, department_head);"
},
{
"code": null,
"e": 4626,
"s": 4565,
"text": "LOCATIONS(subject_code, department_name, location_id, city);"
},
{
"code": null,
"e": 4741,
"s": 4626,
"text": "Select the right query for retrieving records from the tables HONOURS_SUBJECT and LOCATIONS with a full outer join"
},
{
"code": null,
"e": 4900,
"s": 4741,
"text": "A - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h full outer join location l on(h.subject_code = l.subject_code);"
},
{
"code": null,
"e": 5040,
"s": 4900,
"text": "B - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h full outer join location l on(subject_code);"
},
{
"code": null,
"e": 5204,
"s": 5040,
"text": "C - select h.subject_name, l.department_name, h.department_head, l.city from honours_subject h full outer join location l where (h.subject_code = l.subject_code);"
},
{
"code": null,
"e": 5227,
"s": 5204,
"text": "D - None of the above."
},
{
"code": null,
"e": 5287,
"s": 5227,
"text": "Q 5 - A subquery can be placed in which of the SQL clauses?"
},
{
"code": null,
"e": 5308,
"s": 5287,
"text": "A - The WHERE clause"
},
{
"code": null,
"e": 5330,
"s": 5308,
"text": "B - The HAVING clause"
},
{
"code": null,
"e": 5350,
"s": 5330,
"text": "C - The FROM clause"
},
{
"code": null,
"e": 5372,
"s": 5350,
"text": "D - All of the above."
},
{
"code": null,
"e": 5439,
"s": 5372,
"text": "Q 6 - In which of the following cases a DML statement is executed?"
},
{
"code": null,
"e": 5479,
"s": 5439,
"text": "A - When new rows are added to a table."
},
{
"code": null,
"e": 5508,
"s": 5479,
"text": "B - When a table is created."
},
{
"code": null,
"e": 5545,
"s": 5508,
"text": "C - When a transaction is committed."
},
{
"code": null,
"e": 5568,
"s": 5545,
"text": "D - None of the above."
},
{
"code": null,
"e": 5651,
"s": 5568,
"text": "Q 7 - Which of the following is true about the SQL transaction control statements?"
},
{
"code": null,
"e": 5685,
"s": 5651,
"text": "A - They ensure data consistency."
},
{
"code": null,
"e": 5765,
"s": 5685,
"text": "B - They allow preview of data changes before making permanent changes in data."
},
{
"code": null,
"e": 5810,
"s": 5765,
"text": "C - They group logically related operations."
},
{
"code": null,
"e": 5828,
"s": 5810,
"text": "D - All are true."
},
{
"code": null,
"e": 5890,
"s": 5828,
"text": "Q 8 - Which of the following is not true about complex views?"
},
{
"code": null,
"e": 5937,
"s": 5890,
"text": "A - They derive data from more than one table."
},
{
"code": null,
"e": 5980,
"s": 5937,
"text": "B - They contain no functions or grouping."
},
{
"code": null,
"e": 6042,
"s": 5980,
"text": "C - You cannot perform DML operations through a complex view."
},
{
"code": null,
"e": 6073,
"s": 6042,
"text": "D - All of the above are true."
},
{
"code": null,
"e": 6171,
"s": 6073,
"text": "Q 9 - Which of the following is not true about the Pseudocolumns that return the sequence values?"
},
{
"code": null,
"e": 6226,
"s": 6171,
"text": "A - NEXTVAL returns the next available sequence value."
},
{
"code": null,
"e": 6271,
"s": 6226,
"text": "B - CURRVAL gets the current sequence value."
},
{
"code": null,
"e": 6317,
"s": 6271,
"text": "C - PREVVAL gets the previous sequence value."
},
{
"code": null,
"e": 6340,
"s": 6317,
"text": "D - None of the above."
},
{
"code": null,
"e": 6402,
"s": 6340,
"text": "Q 10 - Which of the following is not a developerβs privilege?"
},
{
"code": null,
"e": 6418,
"s": 6402,
"text": "A - CREATE USER"
},
{
"code": null,
"e": 6435,
"s": 6418,
"text": "B - CREATE TABLE"
},
{
"code": null,
"e": 6451,
"s": 6435,
"text": "C - CREATE VIEW"
},
{
"code": null,
"e": 6471,
"s": 6451,
"text": "D - CREATE SEQUENCE"
},
{
"code": null,
"e": 6504,
"s": 6471,
"text": "\n 42 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 6518,
"s": 6504,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 6551,
"s": 6518,
"text": "\n 14 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 6565,
"s": 6551,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 6600,
"s": 6565,
"text": "\n 44 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 6614,
"s": 6600,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 6647,
"s": 6614,
"text": "\n 94 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 6669,
"s": 6647,
"text": " Abhishek And Pukhraj"
},
{
"code": null,
"e": 6704,
"s": 6669,
"text": "\n 80 Lectures \n 6.5 hours \n"
},
{
"code": null,
"e": 6758,
"s": 6704,
"text": " Oracle Master Training | 150,000+ Students Worldwide"
},
{
"code": null,
"e": 6791,
"s": 6758,
"text": "\n 31 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 6819,
"s": 6791,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 6826,
"s": 6819,
"text": " Print"
},
{
"code": null,
"e": 6837,
"s": 6826,
"text": " Add Notes"
}
] |
PyCaret Time Series Module Architecture Overview | by Nikhil Gupta | Towards Data Science
|
Understanding the underlying architecture of any software package goes a long way in making sure we can use it to the best possible extent. It does not mean that one must be aware of every line of code in it, but sometimes, just having an overview can help.
This article aims to provide an architectural overview of the pycaret time series module and shows examples where this information might come in handy while evaluating the model developed by pycaret.
If you have not already done so, I would recommend the following short read. It talks about how pycaret uses regression based forecasting models (something that we will talk about later in this article)
π Reduced Regression Models for Time Series Forecasting
The pycaret time series module is built on top of sktime which is a unified framework for time series analysis. sktime aims to do for time series analysis what sklearn did for machine learning. You can read more about it here if you wish, but it is not required for this article as I will give a quick overview.
sktime provides a framework to:
Create time series models with sklearn regressors using the reduced regression technique (see suggested previous read).Create models pipelines with transformations akin to what sklearn provides.Connect to other time series packages (such as statsmodels, pmdarima, tbats, prophet, etc) using adapters.Allow users to create their own forecasting models using extension templates.
Create time series models with sklearn regressors using the reduced regression technique (see suggested previous read).
Create models pipelines with transformations akin to what sklearn provides.
Connect to other time series packages (such as statsmodels, pmdarima, tbats, prophet, etc) using adapters.
Allow users to create their own forecasting models using extension templates.
While a user can use the sktime library directly to create models, managing the workflow and model comparison process still needs to be handled manually (similar to what you would do if building models in sklearn directly). Thankfully, pycaret provides a convenient way to do this in a few lines of code by wrapping these models, pipelines, and adapters in convenient framework as shown below.
#### Create different types of models ----# ARIMA model from `pmdarima`arima_model = exp.create_model("arima")# ETS and Exponential Smoothing models from `statsmodels`ets_model = exp.create_model("ets")exp_smooth_model = exp.create_model("exp_smooth")# Reduced Regression model using `sklearn` Linear Regressionlr_model = exp.create_model("lr_cds_dt")
So when you create a time series model in pycaret, you get back one of these sktime adapters, pipelines or a sktime compatible model that you developed yourself.
#### Check model types ----print(type(arima_model)) # sktime `pmdarima` adapter print(type(ets_model)) # sktime `statsmodels` adapterprint(type(exp_smooth_model)) # sktime `statsmodels` adapterprint(type(lr_model)) # Your custom sktime compatible model
But there is so much more information that one can extract from these models than meets the eye. For example, if the model that you created using pycaret is called model, the underlying wrapped library model, sktime pipeline or your custom sktime compatible model can be extracted with ease by calling model._forecaster.
#### Access internal models using `_forecaster` ----print(type(arima_model._forecaster))print(type(ets_model._forecaster))print(type(exp_smooth_model._forecaster))print(type(lr_model._forecaster))
From this point onward, you can extract valuable information about your model using either the native library methods/functions or sktime wrappers.
#### What hyperparameters were used to train the model? ----print(arima_model)#### Access statistical fit properties using underlying `pmdarima`arima_model._forecaster.summary()#### Alternately, use sktime's convenient wrapper to do so ---- arima_model.summary()
For example, the above image shows us that the ARIMA model was built with the requirement of an intercept. The fit returned an intercept value of 5.798. We will discuss these statistical details in another post (see Suggested Next Reads), but for now, just know that this information can be readily accessed.
Similarly, we can extract information about pipelines using methods that are similar to how one would do this in sklearn.
#### sktime pipelines are similar to sklearn.#### Access steps using `named_steps` attributeprint(lr_model._forecaster.named_steps.keys())
So this model is actually a pipeline with 3 steps β a conditional deseasonalizer followed by a detrender, followed by the actual forecaster. You can get more details about these steps by just called named_steps. For example, we can see that the forecaster is actually a regression based model using sklearn LinearRegression. This is what we asked for when we built the lr_cds_dt model (lr stands for Linear Regression, cds stands for Conditional Deseasonalizer and dt stand for Detrender).
#### Details about the steps ----pprint(lr_model._forecaster.named_steps)
While pycaret provides a low code environment to create and manage modeling workflows, there is a lot more that can be done if we look under the hood. This article just scratches the surface of the possibilities. In future articles, we will look at how we can use pycaret to understand the working of the underlying models such as ARIMA. Until then, if you would like to connect with me on my social channels (I post about Time Series Analysis frequently), you can find me below. Thatβs it for now. Happy forecasting!
π LinkedIn
π¦ Twitter
π GitHub
Jupyter Notebook (can be opened in Google Colab) containing the code for this article
Understanding ARIMA Models using PyCaretβs Time Series Module β Part 1
Adding Custom Time Series Models to PyCaret
|
[
{
"code": null,
"e": 430,
"s": 172,
"text": "Understanding the underlying architecture of any software package goes a long way in making sure we can use it to the best possible extent. It does not mean that one must be aware of every line of code in it, but sometimes, just having an overview can help."
},
{
"code": null,
"e": 630,
"s": 430,
"text": "This article aims to provide an architectural overview of the pycaret time series module and shows examples where this information might come in handy while evaluating the model developed by pycaret."
},
{
"code": null,
"e": 833,
"s": 630,
"text": "If you have not already done so, I would recommend the following short read. It talks about how pycaret uses regression based forecasting models (something that we will talk about later in this article)"
},
{
"code": null,
"e": 889,
"s": 833,
"text": "π Reduced Regression Models for Time Series Forecasting"
},
{
"code": null,
"e": 1201,
"s": 889,
"text": "The pycaret time series module is built on top of sktime which is a unified framework for time series analysis. sktime aims to do for time series analysis what sklearn did for machine learning. You can read more about it here if you wish, but it is not required for this article as I will give a quick overview."
},
{
"code": null,
"e": 1233,
"s": 1201,
"text": "sktime provides a framework to:"
},
{
"code": null,
"e": 1611,
"s": 1233,
"text": "Create time series models with sklearn regressors using the reduced regression technique (see suggested previous read).Create models pipelines with transformations akin to what sklearn provides.Connect to other time series packages (such as statsmodels, pmdarima, tbats, prophet, etc) using adapters.Allow users to create their own forecasting models using extension templates."
},
{
"code": null,
"e": 1731,
"s": 1611,
"text": "Create time series models with sklearn regressors using the reduced regression technique (see suggested previous read)."
},
{
"code": null,
"e": 1807,
"s": 1731,
"text": "Create models pipelines with transformations akin to what sklearn provides."
},
{
"code": null,
"e": 1914,
"s": 1807,
"text": "Connect to other time series packages (such as statsmodels, pmdarima, tbats, prophet, etc) using adapters."
},
{
"code": null,
"e": 1992,
"s": 1914,
"text": "Allow users to create their own forecasting models using extension templates."
},
{
"code": null,
"e": 2386,
"s": 1992,
"text": "While a user can use the sktime library directly to create models, managing the workflow and model comparison process still needs to be handled manually (similar to what you would do if building models in sklearn directly). Thankfully, pycaret provides a convenient way to do this in a few lines of code by wrapping these models, pipelines, and adapters in convenient framework as shown below."
},
{
"code": null,
"e": 2738,
"s": 2386,
"text": "#### Create different types of models ----# ARIMA model from `pmdarima`arima_model = exp.create_model(\"arima\")# ETS and Exponential Smoothing models from `statsmodels`ets_model = exp.create_model(\"ets\")exp_smooth_model = exp.create_model(\"exp_smooth\")# Reduced Regression model using `sklearn` Linear Regressionlr_model = exp.create_model(\"lr_cds_dt\")"
},
{
"code": null,
"e": 2900,
"s": 2738,
"text": "So when you create a time series model in pycaret, you get back one of these sktime adapters, pipelines or a sktime compatible model that you developed yourself."
},
{
"code": null,
"e": 3173,
"s": 2900,
"text": "#### Check model types ----print(type(arima_model)) # sktime `pmdarima` adapter print(type(ets_model)) # sktime `statsmodels` adapterprint(type(exp_smooth_model)) # sktime `statsmodels` adapterprint(type(lr_model)) # Your custom sktime compatible model"
},
{
"code": null,
"e": 3494,
"s": 3173,
"text": "But there is so much more information that one can extract from these models than meets the eye. For example, if the model that you created using pycaret is called model, the underlying wrapped library model, sktime pipeline or your custom sktime compatible model can be extracted with ease by calling model._forecaster."
},
{
"code": null,
"e": 3691,
"s": 3494,
"text": "#### Access internal models using `_forecaster` ----print(type(arima_model._forecaster))print(type(ets_model._forecaster))print(type(exp_smooth_model._forecaster))print(type(lr_model._forecaster))"
},
{
"code": null,
"e": 3839,
"s": 3691,
"text": "From this point onward, you can extract valuable information about your model using either the native library methods/functions or sktime wrappers."
},
{
"code": null,
"e": 4102,
"s": 3839,
"text": "#### What hyperparameters were used to train the model? ----print(arima_model)#### Access statistical fit properties using underlying `pmdarima`arima_model._forecaster.summary()#### Alternately, use sktime's convenient wrapper to do so ---- arima_model.summary()"
},
{
"code": null,
"e": 4411,
"s": 4102,
"text": "For example, the above image shows us that the ARIMA model was built with the requirement of an intercept. The fit returned an intercept value of 5.798. We will discuss these statistical details in another post (see Suggested Next Reads), but for now, just know that this information can be readily accessed."
},
{
"code": null,
"e": 4533,
"s": 4411,
"text": "Similarly, we can extract information about pipelines using methods that are similar to how one would do this in sklearn."
},
{
"code": null,
"e": 4672,
"s": 4533,
"text": "#### sktime pipelines are similar to sklearn.#### Access steps using `named_steps` attributeprint(lr_model._forecaster.named_steps.keys())"
},
{
"code": null,
"e": 5162,
"s": 4672,
"text": "So this model is actually a pipeline with 3 steps β a conditional deseasonalizer followed by a detrender, followed by the actual forecaster. You can get more details about these steps by just called named_steps. For example, we can see that the forecaster is actually a regression based model using sklearn LinearRegression. This is what we asked for when we built the lr_cds_dt model (lr stands for Linear Regression, cds stands for Conditional Deseasonalizer and dt stand for Detrender)."
},
{
"code": null,
"e": 5236,
"s": 5162,
"text": "#### Details about the steps ----pprint(lr_model._forecaster.named_steps)"
},
{
"code": null,
"e": 5754,
"s": 5236,
"text": "While pycaret provides a low code environment to create and manage modeling workflows, there is a lot more that can be done if we look under the hood. This article just scratches the surface of the possibilities. In future articles, we will look at how we can use pycaret to understand the working of the underlying models such as ARIMA. Until then, if you would like to connect with me on my social channels (I post about Time Series Analysis frequently), you can find me below. Thatβs it for now. Happy forecasting!"
},
{
"code": null,
"e": 5765,
"s": 5754,
"text": "π LinkedIn"
},
{
"code": null,
"e": 5775,
"s": 5765,
"text": "π¦ Twitter"
},
{
"code": null,
"e": 5784,
"s": 5775,
"text": "π GitHub"
},
{
"code": null,
"e": 5870,
"s": 5784,
"text": "Jupyter Notebook (can be opened in Google Colab) containing the code for this article"
},
{
"code": null,
"e": 5941,
"s": 5870,
"text": "Understanding ARIMA Models using PyCaretβs Time Series Module β Part 1"
}
] |
Interpreting Data through Visualization with Python Matplotlib | by Saptashwa Bhattacharyya | Towards Data Science
|
Matplotlib even though is aging, still remains as one of the most vital tools for data visualization, and this post is about using matplotlib effectively, to gain knowledge from a data-set. The IBM data science professional certificate program, which I have started taking around a month back, I found the data-visualization course as a part of 9 courses and this post is a run-through of some powerful techniques that I learnt in the course to elucidate data better. Some useful plotting techniques and new inferences from data are shown here that are not used in the course itself. You will find the codes in detail on my GitHub, where I have shared Jupyter notebook. Rather than the codes, I will focus more on the plots and at times share snippets of code.
The data-set deals with immigration to Canada from various countries over the years 1980 to 2013.
As the data-set is in .xlsx format and there are 3 sheets in the file, below is a portion of notebook that guides you how to read this file into a data-frame
skiprows is for taking care of the initial useless rows in the excel sheet. It is better to rename the column (using pandas.DataFrame.rename) βOdNameβ and βAreaNameβ to βCountry_Nameβ and βContinentsβ respectively, for better understanding, and inplace=True makes sure that the changes are saved in the data-frame. If you donβt want to make this change in the original data-frame you should use inplace=False .
Once we are set and done after few more tweaks, letβs plot using pandas DataFrame.plot and first we will try some bar plots.If we plot the number of immigrants from Haiti over the years then we can see a surprising rising trend of increasing immigrants near 2009, 2010, 2011.
Some of you have already guessed it right, due to catastrophic Haiti earthquake in 2010, number of immigrants sharply increased in 2011. Letβs make a clear representation with awesome annotation
A very similar trend can be seen with the number of immigrants from Iceland as the Icelandic financial crisis (2008β2011) led to severe economic depression.
We can go on to see such trends for separate countries using bar plots but, letβs explore another way to visualize data, using pie plots.
Pie plot is a kind of circular graphic and the slices in this circular plot represent numeric proportions. Here we can see how the numeric proportions of immigrants from different continents varied over 20 years (1985 and 2005) using a pie plot. However, effective representation is the issue. Letβs see below the code and corresponding plot
As you can see these pie charts are visually not pleasing and even though we get a rough idea about how the percentage of immigrants from different continents varied over a span of 20 years, it is still not much self-explanatory. Using the right keywords though can make the pie charts a whole lot better.
The code snippet used to plot the pie charts above is given below β
Few of the important keywords that I learnt are autopct and pctdistance which make sure that percentages are shown up to 2 decimal places (%1.3f will show float numbers up to 3 decimal places) and fix the text distance from the center of circle. For making a title including sub-plots I have used matplotlib.pyplot.suptitle.
From the plots above you can see that in 1985 a significant portion of immigrants were from Europe, compared to 20 years later in 2005 it is completely dominated by Asia. Actually in the early days the immigrants were mostly from British isles, later on India and China took over that spot.
Bubble plots are basically glorified scatter plots where 3 dimensions of data can be displayed in a 2D plot. Apart from usual X and Y, the size of the bubble (or any other marker) represent another dimension (read feature).
To see an example of such plots, I have selected immigration information of India and China over the years 1980 to 2013. We see a jump in numbers around 1997β1998, which could possibly be attributed to the Asian financial crisis. Letβs see it below
If you notice the star markers (representing immigrants from India) they got bigger and the color changed from purple to blue over the years. Letβs see the code snippet below to understand what are exactly represented by the size and color of the marker
In plt.scatter s and c represent the size and color of the the markers. Particularly for this plot to represent Indian immigrants, Iβve made use of both these parameters. Here s is the normalized value of immigrants over the years (multiplied by 2000 so that the marker sizes are big enough) and c, cmap represent just the raw number of immigrants. So higher the number of immigrants is represented by blue and opposite with purple.
One very gripping part of the course was using Folium library, which helps to create several types of interactive Leaflet maps. We will see using the same immigration data-set, how well one can represent some crucial information in the World Map. First we get started with installing Folium.
pip install Folium print folium.__version__ # check the version >> 0.7.0
The special kind of map we are interested in are called Choropleth. Itβs a kind of thematic map where the portion of the map is shaded/patterned according to the proportion of the statistical variable used. Here, I will plot how the number of immigrants varied from all over the world from the year 1985 to the year 2005. The pie plot we have seen before for continents, can be used to complement this Choropleth map.
Now to create a Choropleth map we need a .json file containing the border coordinates of all countries and this file is provided by IBM, which you can get from my GitHub. With this I used the code snippet below to plot a choropleth map of immigrants all over the world in 1985. This maps are interactive so you can zoom in or out but here I just show a screenshot β
In the code above, key_on relates to the country names in .json file and for data the data-frame we are interested in is passed.
Following the same procedure we create another Choropleth map representing immigrants to Canada from all over the world in 2005 and you can clearly see the difference.
One big drawback you can see in the plots above is that the United Kingdom color didnβt change from 1985 to 2005 even if we know from the data-frame that the number of immigrants were quite high in 1980s. The problem is, in the data-frame the country name is β βUnited Kingdom of Great Britain and Northern Irelandβ, whereas in .json file it is just United Kingdom. So one can use replace option inside pandas data-frame in βCountry_Nameβ column β
Canada_df_world_map.Country_Name = Canada_df_world_map.Country_Name.replace({"United Kingdom of Great Britain and Northern Ireland": "United Kingdom"})Canada_df_world_map.tail(20)
It is a crude replacement, because βUKβ and βGRB + Northern Irelandβ isnβt the same, but we can just plot to verify our understanding and letβs check below.
Well, it is finally time to wrap up this post as it is getting longer, but I hope you get a pretty descent idea about the effective data visualization, i.e. how to tell stories with a beautiful presentation. This post covers most of the fundamental techniques that I have learnt in the Data Visualization course offered by IBM in Coursera. Just to review how good it is, I have to say that the labs, where one can directly play with all the techniques taught in lessons, are the most effective and fruitful part of the course. Sometimes, the course material contain some printing mistakes but they are getting addressed slowly. Learning to plot a waffle chart was great too but, I leave that for students who took the course exclusively. Overall, the experience was quite fun, especially for reviewing some of the fundamentals within a week, it is a great course!
Discover more with the complete Jupyter notebook in my GitHub including the data-set used here.
Find me in LinkedIn and sometimes I post cool pics in national geographic.
|
[
{
"code": null,
"e": 932,
"s": 171,
"text": "Matplotlib even though is aging, still remains as one of the most vital tools for data visualization, and this post is about using matplotlib effectively, to gain knowledge from a data-set. The IBM data science professional certificate program, which I have started taking around a month back, I found the data-visualization course as a part of 9 courses and this post is a run-through of some powerful techniques that I learnt in the course to elucidate data better. Some useful plotting techniques and new inferences from data are shown here that are not used in the course itself. You will find the codes in detail on my GitHub, where I have shared Jupyter notebook. Rather than the codes, I will focus more on the plots and at times share snippets of code."
},
{
"code": null,
"e": 1030,
"s": 932,
"text": "The data-set deals with immigration to Canada from various countries over the years 1980 to 2013."
},
{
"code": null,
"e": 1188,
"s": 1030,
"text": "As the data-set is in .xlsx format and there are 3 sheets in the file, below is a portion of notebook that guides you how to read this file into a data-frame"
},
{
"code": null,
"e": 1599,
"s": 1188,
"text": "skiprows is for taking care of the initial useless rows in the excel sheet. It is better to rename the column (using pandas.DataFrame.rename) βOdNameβ and βAreaNameβ to βCountry_Nameβ and βContinentsβ respectively, for better understanding, and inplace=True makes sure that the changes are saved in the data-frame. If you donβt want to make this change in the original data-frame you should use inplace=False ."
},
{
"code": null,
"e": 1875,
"s": 1599,
"text": "Once we are set and done after few more tweaks, letβs plot using pandas DataFrame.plot and first we will try some bar plots.If we plot the number of immigrants from Haiti over the years then we can see a surprising rising trend of increasing immigrants near 2009, 2010, 2011."
},
{
"code": null,
"e": 2070,
"s": 1875,
"text": "Some of you have already guessed it right, due to catastrophic Haiti earthquake in 2010, number of immigrants sharply increased in 2011. Letβs make a clear representation with awesome annotation"
},
{
"code": null,
"e": 2227,
"s": 2070,
"text": "A very similar trend can be seen with the number of immigrants from Iceland as the Icelandic financial crisis (2008β2011) led to severe economic depression."
},
{
"code": null,
"e": 2365,
"s": 2227,
"text": "We can go on to see such trends for separate countries using bar plots but, letβs explore another way to visualize data, using pie plots."
},
{
"code": null,
"e": 2707,
"s": 2365,
"text": "Pie plot is a kind of circular graphic and the slices in this circular plot represent numeric proportions. Here we can see how the numeric proportions of immigrants from different continents varied over 20 years (1985 and 2005) using a pie plot. However, effective representation is the issue. Letβs see below the code and corresponding plot"
},
{
"code": null,
"e": 3013,
"s": 2707,
"text": "As you can see these pie charts are visually not pleasing and even though we get a rough idea about how the percentage of immigrants from different continents varied over a span of 20 years, it is still not much self-explanatory. Using the right keywords though can make the pie charts a whole lot better."
},
{
"code": null,
"e": 3081,
"s": 3013,
"text": "The code snippet used to plot the pie charts above is given below β"
},
{
"code": null,
"e": 3406,
"s": 3081,
"text": "Few of the important keywords that I learnt are autopct and pctdistance which make sure that percentages are shown up to 2 decimal places (%1.3f will show float numbers up to 3 decimal places) and fix the text distance from the center of circle. For making a title including sub-plots I have used matplotlib.pyplot.suptitle."
},
{
"code": null,
"e": 3697,
"s": 3406,
"text": "From the plots above you can see that in 1985 a significant portion of immigrants were from Europe, compared to 20 years later in 2005 it is completely dominated by Asia. Actually in the early days the immigrants were mostly from British isles, later on India and China took over that spot."
},
{
"code": null,
"e": 3921,
"s": 3697,
"text": "Bubble plots are basically glorified scatter plots where 3 dimensions of data can be displayed in a 2D plot. Apart from usual X and Y, the size of the bubble (or any other marker) represent another dimension (read feature)."
},
{
"code": null,
"e": 4170,
"s": 3921,
"text": "To see an example of such plots, I have selected immigration information of India and China over the years 1980 to 2013. We see a jump in numbers around 1997β1998, which could possibly be attributed to the Asian financial crisis. Letβs see it below"
},
{
"code": null,
"e": 4424,
"s": 4170,
"text": "If you notice the star markers (representing immigrants from India) they got bigger and the color changed from purple to blue over the years. Letβs see the code snippet below to understand what are exactly represented by the size and color of the marker"
},
{
"code": null,
"e": 4857,
"s": 4424,
"text": "In plt.scatter s and c represent the size and color of the the markers. Particularly for this plot to represent Indian immigrants, Iβve made use of both these parameters. Here s is the normalized value of immigrants over the years (multiplied by 2000 so that the marker sizes are big enough) and c, cmap represent just the raw number of immigrants. So higher the number of immigrants is represented by blue and opposite with purple."
},
{
"code": null,
"e": 5149,
"s": 4857,
"text": "One very gripping part of the course was using Folium library, which helps to create several types of interactive Leaflet maps. We will see using the same immigration data-set, how well one can represent some crucial information in the World Map. First we get started with installing Folium."
},
{
"code": null,
"e": 5222,
"s": 5149,
"text": "pip install Folium print folium.__version__ # check the version >> 0.7.0"
},
{
"code": null,
"e": 5640,
"s": 5222,
"text": "The special kind of map we are interested in are called Choropleth. Itβs a kind of thematic map where the portion of the map is shaded/patterned according to the proportion of the statistical variable used. Here, I will plot how the number of immigrants varied from all over the world from the year 1985 to the year 2005. The pie plot we have seen before for continents, can be used to complement this Choropleth map."
},
{
"code": null,
"e": 6006,
"s": 5640,
"text": "Now to create a Choropleth map we need a .json file containing the border coordinates of all countries and this file is provided by IBM, which you can get from my GitHub. With this I used the code snippet below to plot a choropleth map of immigrants all over the world in 1985. This maps are interactive so you can zoom in or out but here I just show a screenshot β"
},
{
"code": null,
"e": 6135,
"s": 6006,
"text": "In the code above, key_on relates to the country names in .json file and for data the data-frame we are interested in is passed."
},
{
"code": null,
"e": 6303,
"s": 6135,
"text": "Following the same procedure we create another Choropleth map representing immigrants to Canada from all over the world in 2005 and you can clearly see the difference."
},
{
"code": null,
"e": 6751,
"s": 6303,
"text": "One big drawback you can see in the plots above is that the United Kingdom color didnβt change from 1985 to 2005 even if we know from the data-frame that the number of immigrants were quite high in 1980s. The problem is, in the data-frame the country name is β βUnited Kingdom of Great Britain and Northern Irelandβ, whereas in .json file it is just United Kingdom. So one can use replace option inside pandas data-frame in βCountry_Nameβ column β"
},
{
"code": null,
"e": 6931,
"s": 6751,
"text": "Canada_df_world_map.Country_Name = Canada_df_world_map.Country_Name.replace({\"United Kingdom of Great Britain and Northern Ireland\": \"United Kingdom\"})Canada_df_world_map.tail(20)"
},
{
"code": null,
"e": 7088,
"s": 6931,
"text": "It is a crude replacement, because βUKβ and βGRB + Northern Irelandβ isnβt the same, but we can just plot to verify our understanding and letβs check below."
},
{
"code": null,
"e": 7952,
"s": 7088,
"text": "Well, it is finally time to wrap up this post as it is getting longer, but I hope you get a pretty descent idea about the effective data visualization, i.e. how to tell stories with a beautiful presentation. This post covers most of the fundamental techniques that I have learnt in the Data Visualization course offered by IBM in Coursera. Just to review how good it is, I have to say that the labs, where one can directly play with all the techniques taught in lessons, are the most effective and fruitful part of the course. Sometimes, the course material contain some printing mistakes but they are getting addressed slowly. Learning to plot a waffle chart was great too but, I leave that for students who took the course exclusively. Overall, the experience was quite fun, especially for reviewing some of the fundamentals within a week, it is a great course!"
},
{
"code": null,
"e": 8048,
"s": 7952,
"text": "Discover more with the complete Jupyter notebook in my GitHub including the data-set used here."
}
] |
JavaScript RegExp - [a-z]
|
[a-z] matches any character from lowercase a through lowercase z.
Following example shows usage of RegExp expression.
<html>
<head>
<title>JavaScript RegExp</title>
</head>
<body>
<script type = "text/javascript">
var str = "first";
var pattern = /[a-z]/g;
var result = str.match(pattern);
document.write("Test 1 - returned value : " + result);
str = "Second";
result = str.match(pattern);
document.write("<br/>Test 2 - returned value : " + result);
</script>
</body>
</html>
Test 1 - returned value : f,i,r,s,t
Test 2 - returned value : e,c,o,n,d
25 Lectures
2.5 hours
Anadi Sharma
74 Lectures
10 hours
Lets Kode It
72 Lectures
4.5 hours
Frahaan Hussain
70 Lectures
4.5 hours
Frahaan Hussain
46 Lectures
6 hours
Eduonix Learning Solutions
88 Lectures
14 hours
Eduonix Learning Solutions
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 3147,
"s": 3081,
"text": "[a-z] matches any character from lowercase a through lowercase z."
},
{
"code": null,
"e": 3199,
"s": 3147,
"text": "Following example shows usage of RegExp expression."
},
{
"code": null,
"e": 3665,
"s": 3199,
"text": "<html>\n <head>\n <title>JavaScript RegExp</title>\n </head>\n \n <body>\n <script type = \"text/javascript\">\n var str = \"first\";\n var pattern = /[a-z]/g;\n\n var result = str.match(pattern);\n document.write(\"Test 1 - returned value : \" + result); \n\n str = \"Second\";\n result = str.match(pattern);\n document.write(\"<br/>Test 2 - returned value : \" + result); \t \t\t \n </script>\n </body>\n</html>"
},
{
"code": null,
"e": 3739,
"s": 3665,
"text": "Test 1 - returned value : f,i,r,s,t\nTest 2 - returned value : e,c,o,n,d \n"
},
{
"code": null,
"e": 3774,
"s": 3739,
"text": "\n 25 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 3788,
"s": 3774,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 3822,
"s": 3788,
"text": "\n 74 Lectures \n 10 hours \n"
},
{
"code": null,
"e": 3836,
"s": 3822,
"text": " Lets Kode It"
},
{
"code": null,
"e": 3871,
"s": 3836,
"text": "\n 72 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 3888,
"s": 3871,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 3923,
"s": 3888,
"text": "\n 70 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 3940,
"s": 3923,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 3973,
"s": 3940,
"text": "\n 46 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 4001,
"s": 3973,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 4035,
"s": 4001,
"text": "\n 88 Lectures \n 14 hours \n"
},
{
"code": null,
"e": 4063,
"s": 4035,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 4070,
"s": 4063,
"text": " Print"
},
{
"code": null,
"e": 4081,
"s": 4070,
"text": " Add Notes"
}
] |
Customizable correlation heatmaps in R using purrr and ggplot2 | by Kat Hoffman | Towards Data Science
|
If youβre ever felt limited by correlation heat map packages in R, this post will show you how to write your own function to tidy the many correlations into a ggplot2-friendly form for plotting.
By the end, you will be able to run one function to get a tidied data frame of correlations. You can then run ggplot2 code on this data frame to make your own correlation heat maps.
If you just want the code, you can skip to the end. You can also read my other blog posts on my website, KHstats.
I really appreciate some of the packages and functions that allow me to make correlation plots super quickly using R. Here are a few examples:
corrplot::corrplot(cor(mtcars))
corrgram::corrgram(mtcars)
ggcorrplot::ggcorrplot(cor(mtcars))
All of these are nice, but none of them is ultimately as customizable as I need them to be. Iβll next show how you can bypass using someone elseβs function constraints to prepare correlations in your data in a ggplot2-friendly format.
We could use the base R function cor() to get our correlations, but I do not like the defaults for missing data. Instead, I use Frank Harrell's Hmisc::rcorr() function for two reasons:
it drops missing pairs as the defaultit returns p-values, so you only need one function to get both the correlation coefficient and matching p-value
it drops missing pairs as the default
it returns p-values, so you only need one function to get both the correlation coefficient and matching p-value
Letβs load the libraries weβll need for this, which are knitr for showing tables using kable, and tidyverse (we'll specifically use tidyr, dplyr, ggplot2, tibble and purrr).
library(knitr) library(tidyverse, warn.conflict=F)
First, letβs look at our output from our correlation function weβll use, Hmisc::rcorr(). It requires the input to be a matrix and outputs a list of three matrices.
mtcars_cor <- Hmisc::rcorr(as.matrix(mtcars))
These three matrices include the correlation coefficient (default is Pearsonβs), r, the p-value, P, and the number of observations used for each correlation, n. Let's turn each matrix into a data frame and look at the top six rows with head and kable.
The correlation coefficients, r:
data.frame(mtcars_cor$r) %>% head() %>% kable()
The p-values, P:
data.frame(mtcars_cor$P) %>% head() %>% kable()
The number of observations, n. There are no missing data in the mtcars data set so there are 32 pairs used for all correlations.
data.frame(mtcars_cor$n) %>% head(n=3) %>% kable()
Next, we can write a function that formats a data frame correctly for Hmisc::rcorr() and then turns each of the three elements of the list ( r, n and P)
cors <- function(df) { # turn all three matrices (r, n, and P into a data frame) M <- Hmisc::rcorr(as.matrix(df)) # return the three data frames in a list return(Mdf) Mdf <- map(M, ~data.frame(.x)) }
Nothing too crazy happened in this function. Now we just have a list of three data frames. We can look at the first element of our list using first(), which shows us the correlations between all our variables:
cors(mtcars) %>% first() %>% head() %>% kable()
The next step is to get the data ready for plotting with ggplot2. We can keep the data in a list for now and use the map() function from purrr.
First, we need to move the rownames to their own column using tibble::rownames_to_column(). The output of that looks like:
cors(mtcars) %>% map(~rownames_to_column(.x, var="measure1")) %>% # look at the first element of the list (r) first() %>% head() %>% kable()
Next, we can turn move of the columns to a single column called measure2 using tidyr::pivot_longer()
cors(mtcars) %>% map(~rownames_to_column(.x, var="measure1")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, "measure2")) %>% # look at the first element of the list (r) first() %>% head() %>% kable()
Now, weβre ready to unlist our data by using bind_rows(). This will turn our correlations into a very long data frame with all the rows from r, then n, then P.
cors(mtcars) %>% map(~rownames_to_column(.x, var="measure1")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, "measure2")) %>% # merge our three list elements by binding the rows bind_rows(.id = "id") %>% head() %>% kable()
For ggplot2, we'll need to have r, n, and P as their own column. We can use pivot_longer() to do this.
cors(mtcars) %>% map(~rownames_to_column(.x, var="measure1")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, "measure2")) %>% # merge our three list elements by binding the rows bind_rows(.id = "id") %>% pivot_wider(names_from = id, values_from = value) %>% head() %>% kable()
Finally, we can add a few columns that will potentially be useful later for making our correlation plots more informative. Letβs add columns that tell us whether the p-value was less than 0.05, and if so, give us back 1) the p-value and 2) the correlation coefficient, in case we want to label our plot with these values.
cors(mtcars) %>% map(~rownames_to_column(.x, var="measure1")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, "measure2")) %>% # merge our three list elements by binding the rows bind_rows(.id = "id") %>% pivot_wider(names_from = id, values_from = value) %>% mutate(sig_p = ifelse(P < .05, T, F), p_if_sig = ifelse(P <.05, P, NA), r_if_sig = ifelse(r <.05, r, NA)) %>% head() %>% kable()
This seems like everything I think Iβll ever want to plot. Of course, you could add more. At this point I turned my formatted correlations into a function:
formatted_cors <- function(df){ cors(df) %>% map(~rownames_to_column(.x, var="measure1")) %>% map(~pivot_longer(.x, -measure1, "measure2")) %>% bind_rows(.id = "id") %>% pivot_wider(names_from = id, values_from = value) %>% mutate(sig_p = ifelse(P < .05, T, F), p_if_sig = ifelse(P <.05, P, NA), r_if_sig = ifelse(P <.05, r, NA)) }
We can test the function works as expected:
formatted_cors(mtcars) %>% head() %>% kable()
Weβre finally ready to plot our correlation heat maps in ggplot2.
The simplest form of this plot only requires us to specify measure1 and measure2 on the x and y-axis, respectively. Then we can map the correlation r to the fill aesthetic, and add a tile as the geometry.
formatted_cors(mtcars) %>% ggplot(aes(x = measure1, y = measure2, fill = r)) + geom_tile()
We can make some minor aesthetic changes, such as the fill coloring scale, titles, and font family.
formatted_cors(mtcars) %>% ggplot(aes(x = measure1, y = measure2, fill = r)) + geom_tile() + labs(x = NULL, y = NULL, fill = "Pearson's\nCorrelation", title="Correlations in Mtcars") + # map a red, white and blue color scale to correspond to -1:1 sequential gradient scale_fill_gradient2(mid="#FBFEF9",low="#0C6291",high="#A63446", limits=c(-1,1)) + theme_classic() + # remove excess space on x and y axes scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + # change global font to roboto theme(text=element_text(family="Roboto"))
We can add the correlations for extra information. For this particular plot, I only added significant (p-value less than 0.05) correlations, using the column r_if_sig that outputs from formatted_cors().
formatted_cors(mtcars) %>% ggplot(aes(measure1, measure2, fill=r, label=round(r_if_sig,2))) + geom_tile() + labs(x = NULL, y = NULL, fill = "Pearson's\nCorrelation", title="Correlations in Mtcars", subtitle="Only significant Pearson's correlation coefficients shown") + scale_fill_gradient2(mid="#FBFEF9",low="#0C6291",high="#A63446", limits=c(-1,1)) + geom_text() + theme_classic() + scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + theme(text=element_text(family="Roboto"))
Another version of this could involve squares with different sizes to denote the strength of correlation using geom_point with shape set to a value from these available geom_shapes. Make sure you take the absolute value of the correlation so that strong negative correlations can also be denoted larger.
formatted_cors(mtcars) %>% ggplot(aes(measure1, measure2, col=r)) + ## to get the rect filled geom_tile(col="black", fill="white") + geom_point(aes(size = abs(r)), shape=15) + labs(x = NULL, y = NULL, col = "Pearson's\nCorrelation", title="Correlations in Mtcars") + theme_classic() + scale_color_gradient2(mid="#FBFEF9",low="#0C6291",high="#A63446", limits=c(-1,1)) + scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + theme(text=element_text(family="Roboto")) + scale_size(range=c(1,11), guide=NULL)
cors <- function(df) { M <- Hmisc::rcorr(as.matrix(df)) Mdf <- map(M, ~data.frame(.x)) return(Mdf) }formatted_cors <- function(df){ cors(df) %>% map(~rownames_to_column(.x, var="measure1")) %>% map(~pivot_longer(.x, -measure1, "measure2")) %>% bind_rows(.id = "id") %>% pivot_wider(names_from = id, values_from = value) %>% mutate(sig_p = ifelse(P < .05, T, F), p_if_sig = ifelse(P <.05, P, NA), r_if_sig = ifelse(P <.05, r, NA)) }formatted_cors(mtcars) %>% ggplot(aes(measure1, measure2, fill=r, label=round(r_if_sig,2))) + geom_tile() + labs(x = NULL, y = NULL, fill = "Pearson's\nCorrelation", title="Correlations in Mtcars", subtitle="Only significant Pearson's correlation coefficients shown") + scale_fill_gradient2(mid="#FBFEF9",low="#0C6291",high="#A63446", limits=c(-1,1)) + geom_text() + theme_classic() + scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + theme(text=element_text(family="Roboto"))
Originally published at https://www.khstats.com on March 15, 2020.
|
[
{
"code": null,
"e": 367,
"s": 172,
"text": "If youβre ever felt limited by correlation heat map packages in R, this post will show you how to write your own function to tidy the many correlations into a ggplot2-friendly form for plotting."
},
{
"code": null,
"e": 549,
"s": 367,
"text": "By the end, you will be able to run one function to get a tidied data frame of correlations. You can then run ggplot2 code on this data frame to make your own correlation heat maps."
},
{
"code": null,
"e": 663,
"s": 549,
"text": "If you just want the code, you can skip to the end. You can also read my other blog posts on my website, KHstats."
},
{
"code": null,
"e": 806,
"s": 663,
"text": "I really appreciate some of the packages and functions that allow me to make correlation plots super quickly using R. Here are a few examples:"
},
{
"code": null,
"e": 838,
"s": 806,
"text": "corrplot::corrplot(cor(mtcars))"
},
{
"code": null,
"e": 865,
"s": 838,
"text": "corrgram::corrgram(mtcars)"
},
{
"code": null,
"e": 901,
"s": 865,
"text": "ggcorrplot::ggcorrplot(cor(mtcars))"
},
{
"code": null,
"e": 1136,
"s": 901,
"text": "All of these are nice, but none of them is ultimately as customizable as I need them to be. Iβll next show how you can bypass using someone elseβs function constraints to prepare correlations in your data in a ggplot2-friendly format."
},
{
"code": null,
"e": 1321,
"s": 1136,
"text": "We could use the base R function cor() to get our correlations, but I do not like the defaults for missing data. Instead, I use Frank Harrell's Hmisc::rcorr() function for two reasons:"
},
{
"code": null,
"e": 1470,
"s": 1321,
"text": "it drops missing pairs as the defaultit returns p-values, so you only need one function to get both the correlation coefficient and matching p-value"
},
{
"code": null,
"e": 1508,
"s": 1470,
"text": "it drops missing pairs as the default"
},
{
"code": null,
"e": 1620,
"s": 1508,
"text": "it returns p-values, so you only need one function to get both the correlation coefficient and matching p-value"
},
{
"code": null,
"e": 1794,
"s": 1620,
"text": "Letβs load the libraries weβll need for this, which are knitr for showing tables using kable, and tidyverse (we'll specifically use tidyr, dplyr, ggplot2, tibble and purrr)."
},
{
"code": null,
"e": 1845,
"s": 1794,
"text": "library(knitr) library(tidyverse, warn.conflict=F)"
},
{
"code": null,
"e": 2009,
"s": 1845,
"text": "First, letβs look at our output from our correlation function weβll use, Hmisc::rcorr(). It requires the input to be a matrix and outputs a list of three matrices."
},
{
"code": null,
"e": 2055,
"s": 2009,
"text": "mtcars_cor <- Hmisc::rcorr(as.matrix(mtcars))"
},
{
"code": null,
"e": 2307,
"s": 2055,
"text": "These three matrices include the correlation coefficient (default is Pearsonβs), r, the p-value, P, and the number of observations used for each correlation, n. Let's turn each matrix into a data frame and look at the top six rows with head and kable."
},
{
"code": null,
"e": 2340,
"s": 2307,
"text": "The correlation coefficients, r:"
},
{
"code": null,
"e": 2388,
"s": 2340,
"text": "data.frame(mtcars_cor$r) %>% head() %>% kable()"
},
{
"code": null,
"e": 2405,
"s": 2388,
"text": "The p-values, P:"
},
{
"code": null,
"e": 2453,
"s": 2405,
"text": "data.frame(mtcars_cor$P) %>% head() %>% kable()"
},
{
"code": null,
"e": 2582,
"s": 2453,
"text": "The number of observations, n. There are no missing data in the mtcars data set so there are 32 pairs used for all correlations."
},
{
"code": null,
"e": 2633,
"s": 2582,
"text": "data.frame(mtcars_cor$n) %>% head(n=3) %>% kable()"
},
{
"code": null,
"e": 2786,
"s": 2633,
"text": "Next, we can write a function that formats a data frame correctly for Hmisc::rcorr() and then turns each of the three elements of the list ( r, n and P)"
},
{
"code": null,
"e": 2996,
"s": 2786,
"text": "cors <- function(df) { # turn all three matrices (r, n, and P into a data frame) M <- Hmisc::rcorr(as.matrix(df)) # return the three data frames in a list return(Mdf) Mdf <- map(M, ~data.frame(.x)) }"
},
{
"code": null,
"e": 3206,
"s": 2996,
"text": "Nothing too crazy happened in this function. Now we just have a list of three data frames. We can look at the first element of our list using first(), which shows us the correlations between all our variables:"
},
{
"code": null,
"e": 3254,
"s": 3206,
"text": "cors(mtcars) %>% first() %>% head() %>% kable()"
},
{
"code": null,
"e": 3398,
"s": 3254,
"text": "The next step is to get the data ready for plotting with ggplot2. We can keep the data in a list for now and use the map() function from purrr."
},
{
"code": null,
"e": 3521,
"s": 3398,
"text": "First, we need to move the rownames to their own column using tibble::rownames_to_column(). The output of that looks like:"
},
{
"code": null,
"e": 3673,
"s": 3521,
"text": "cors(mtcars) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% # look at the first element of the list (r) first() %>% head() %>% kable()"
},
{
"code": null,
"e": 3774,
"s": 3673,
"text": "Next, we can turn move of the columns to a single column called measure2 using tidyr::pivot_longer()"
},
{
"code": null,
"e": 4001,
"s": 3774,
"text": "cors(mtcars) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, \"measure2\")) %>% # look at the first element of the list (r) first() %>% head() %>% kable()"
},
{
"code": null,
"e": 4161,
"s": 4001,
"text": "Now, weβre ready to unlist our data by using bind_rows(). This will turn our correlations into a very long data frame with all the rows from r, then n, then P."
},
{
"code": null,
"e": 4411,
"s": 4161,
"text": "cors(mtcars) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, \"measure2\")) %>% # merge our three list elements by binding the rows bind_rows(.id = \"id\") %>% head() %>% kable()"
},
{
"code": null,
"e": 4514,
"s": 4411,
"text": "For ggplot2, we'll need to have r, n, and P as their own column. We can use pivot_longer() to do this."
},
{
"code": null,
"e": 4818,
"s": 4514,
"text": "cors(mtcars) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, \"measure2\")) %>% # merge our three list elements by binding the rows bind_rows(.id = \"id\") %>% pivot_wider(names_from = id, values_from = value) %>% head() %>% kable()"
},
{
"code": null,
"e": 5140,
"s": 4818,
"text": "Finally, we can add a few columns that will potentially be useful later for making our correlation plots more informative. Letβs add columns that tell us whether the p-value was less than 0.05, and if so, give us back 1) the p-value and 2) the correlation coefficient, in case we want to label our plot with these values."
},
{
"code": null,
"e": 5555,
"s": 5140,
"text": "cors(mtcars) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% # format each data set (r,P,n) long map(~pivot_longer(.x, -measure1, \"measure2\")) %>% # merge our three list elements by binding the rows bind_rows(.id = \"id\") %>% pivot_wider(names_from = id, values_from = value) %>% mutate(sig_p = ifelse(P < .05, T, F), p_if_sig = ifelse(P <.05, P, NA), r_if_sig = ifelse(r <.05, r, NA)) %>% head() %>% kable()"
},
{
"code": null,
"e": 5711,
"s": 5555,
"text": "This seems like everything I think Iβll ever want to plot. Of course, you could add more. At this point I turned my formatted correlations into a function:"
},
{
"code": null,
"e": 6044,
"s": 5711,
"text": "formatted_cors <- function(df){ cors(df) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% map(~pivot_longer(.x, -measure1, \"measure2\")) %>% bind_rows(.id = \"id\") %>% pivot_wider(names_from = id, values_from = value) %>% mutate(sig_p = ifelse(P < .05, T, F), p_if_sig = ifelse(P <.05, P, NA), r_if_sig = ifelse(P <.05, r, NA)) }"
},
{
"code": null,
"e": 6088,
"s": 6044,
"text": "We can test the function works as expected:"
},
{
"code": null,
"e": 6134,
"s": 6088,
"text": "formatted_cors(mtcars) %>% head() %>% kable()"
},
{
"code": null,
"e": 6200,
"s": 6134,
"text": "Weβre finally ready to plot our correlation heat maps in ggplot2."
},
{
"code": null,
"e": 6405,
"s": 6200,
"text": "The simplest form of this plot only requires us to specify measure1 and measure2 on the x and y-axis, respectively. Then we can map the correlation r to the fill aesthetic, and add a tile as the geometry."
},
{
"code": null,
"e": 6496,
"s": 6405,
"text": "formatted_cors(mtcars) %>% ggplot(aes(x = measure1, y = measure2, fill = r)) + geom_tile()"
},
{
"code": null,
"e": 6596,
"s": 6496,
"text": "We can make some minor aesthetic changes, such as the fill coloring scale, titles, and font family."
},
{
"code": null,
"e": 7143,
"s": 6596,
"text": "formatted_cors(mtcars) %>% ggplot(aes(x = measure1, y = measure2, fill = r)) + geom_tile() + labs(x = NULL, y = NULL, fill = \"Pearson's\\nCorrelation\", title=\"Correlations in Mtcars\") + # map a red, white and blue color scale to correspond to -1:1 sequential gradient scale_fill_gradient2(mid=\"#FBFEF9\",low=\"#0C6291\",high=\"#A63446\", limits=c(-1,1)) + theme_classic() + # remove excess space on x and y axes scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + # change global font to roboto theme(text=element_text(family=\"Roboto\"))"
},
{
"code": null,
"e": 7346,
"s": 7143,
"text": "We can add the correlations for extra information. For this particular plot, I only added significant (p-value less than 0.05) correlations, using the column r_if_sig that outputs from formatted_cors()."
},
{
"code": null,
"e": 7842,
"s": 7346,
"text": "formatted_cors(mtcars) %>% ggplot(aes(measure1, measure2, fill=r, label=round(r_if_sig,2))) + geom_tile() + labs(x = NULL, y = NULL, fill = \"Pearson's\\nCorrelation\", title=\"Correlations in Mtcars\", subtitle=\"Only significant Pearson's correlation coefficients shown\") + scale_fill_gradient2(mid=\"#FBFEF9\",low=\"#0C6291\",high=\"#A63446\", limits=c(-1,1)) + geom_text() + theme_classic() + scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + theme(text=element_text(family=\"Roboto\"))"
},
{
"code": null,
"e": 8146,
"s": 7842,
"text": "Another version of this could involve squares with different sizes to denote the strength of correlation using geom_point with shape set to a value from these available geom_shapes. Make sure you take the absolute value of the correlation so that strong negative correlations can also be denoted larger."
},
{
"code": null,
"e": 8665,
"s": 8146,
"text": "formatted_cors(mtcars) %>% ggplot(aes(measure1, measure2, col=r)) + ## to get the rect filled geom_tile(col=\"black\", fill=\"white\") + geom_point(aes(size = abs(r)), shape=15) + labs(x = NULL, y = NULL, col = \"Pearson's\\nCorrelation\", title=\"Correlations in Mtcars\") + theme_classic() + scale_color_gradient2(mid=\"#FBFEF9\",low=\"#0C6291\",high=\"#A63446\", limits=c(-1,1)) + scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + theme(text=element_text(family=\"Roboto\")) + scale_size(range=c(1,11), guide=NULL)"
},
{
"code": null,
"e": 9597,
"s": 8665,
"text": "cors <- function(df) { M <- Hmisc::rcorr(as.matrix(df)) Mdf <- map(M, ~data.frame(.x)) return(Mdf) }formatted_cors <- function(df){ cors(df) %>% map(~rownames_to_column(.x, var=\"measure1\")) %>% map(~pivot_longer(.x, -measure1, \"measure2\")) %>% bind_rows(.id = \"id\") %>% pivot_wider(names_from = id, values_from = value) %>% mutate(sig_p = ifelse(P < .05, T, F), p_if_sig = ifelse(P <.05, P, NA), r_if_sig = ifelse(P <.05, r, NA)) }formatted_cors(mtcars) %>% ggplot(aes(measure1, measure2, fill=r, label=round(r_if_sig,2))) + geom_tile() + labs(x = NULL, y = NULL, fill = \"Pearson's\\nCorrelation\", title=\"Correlations in Mtcars\", subtitle=\"Only significant Pearson's correlation coefficients shown\") + scale_fill_gradient2(mid=\"#FBFEF9\",low=\"#0C6291\",high=\"#A63446\", limits=c(-1,1)) + geom_text() + theme_classic() + scale_x_discrete(expand=c(0,0)) + scale_y_discrete(expand=c(0,0)) + theme(text=element_text(family=\"Roboto\"))"
}
] |
Training multiple machine learning models and running data tasks in parallel via YARN + Spark + multithreading | by Edson Hiroshi Aoki | Towards Data Science
|
image: Freepik.com
To objective of this article is to show how a single data scientist can launch dozens or hundreds of data science-related tasks simultaneously (including machine learning model training) without using complex deployment frameworks. In fact, the tasks can be launched from a βdata scientistβ-friendly interface, namely, a single Python script which can be run from an interactive shell such as Jupyter, Spyder or Cloudera Workbench. The tasks can be themselves parallelised in order to handle large amounts of data, such that we effectively add a second layer of parallelism.
Data scientists who wish to do more work with less time, by making use of large scale computational resources (e.g. clusters or public clouds), possibly shared with other users via YARN. To understand this article you need a good knowledge of Python, working knowledge of Spark, and at least basic understanding about Hadoop YARN architecture and shell scripting;
Machine learning engineers who are supporting data scientists on making use of available computational capacity and operating large scale data
βData scienceβ and βautomationβ are two words that invariably go hand-in-hand with each other, as one of the keys goals of machine learning is to allow machines to perform tasks more quickly, with lower cost, and/or better quality than humans.
Naturally, it wouldnβt make sense for an organization to spend more on tech staff that are supposed to develop and maintain systems that automate work (data scientists, data engineers, DevOps engineers, software engineers and others) than on the staff that do the work manually. Itβs not thus surprising that a recurrent discussion is how much we can automate the work of data science teams themselves, for instance via automated machine learning.
To achieve cost-effective data science automation, it is imperative to able to harness computational power from public or private clouds; after all, the cost of hardware is quite low compared to the cost of highly skilled technical staff. While technology to achieve so is certainly available, many organisations ended up facing the βbig data software engineer vs data scientist conundrumβ, or more precisely, the drastic discrepancy between
βBig data software engineer skillsβ, i.e. skills necessary to manipulate massive amounts of data in complex computational environments, and run these processes in a reliable manner along with other concurrent processes
βData scientist skillsβ, i.e. skills necessary to apply algorithms and mathematics to the data to extract insights valuable from a business standpoint
image: Freepik.com
Some organisations would make βdata scientistsβ responsible for developing the analytics models in some sort of βcontrolled analytics environmentβ where one does not need to think too much about the underlying computational resources or sharing the resources with other processes, and βbig data software engineersβ responsible for coding βproduction-readyβ versions of the models developed by data scientists and deploy them into production. This setup resulted in obvious inefficiencies, such as:
Data scientists developing sub-optimal models due to not making use of large scale data and computational resources. In some organisations, data scientists even ended up working with single-node frameworks such as Pandas/Scikit-Learn and basing their models entirely on small datasets obtained via sampling or over-engineered features;Developed models performing well on analytics environment but not performing well, or being completely unable to run, in production environment;The difficulty to evaluate generation of business value, identify and fix problems, as well as making iterative improvements, as data scientists end up dramatically losing oversight of the analytics process once models are sent into production.
Data scientists developing sub-optimal models due to not making use of large scale data and computational resources. In some organisations, data scientists even ended up working with single-node frameworks such as Pandas/Scikit-Learn and basing their models entirely on small datasets obtained via sampling or over-engineered features;
Developed models performing well on analytics environment but not performing well, or being completely unable to run, in production environment;
The difficulty to evaluate generation of business value, identify and fix problems, as well as making iterative improvements, as data scientists end up dramatically losing oversight of the analytics process once models are sent into production.
Different organisations dealt with this situation with different ways, either by forcing big data software engineers and data scientists learn the skills of the βother roleβ, or by creating a βthird roleβ, named βMachine Learning Engineerβ to bridge the gap between the two roles.
But the fact is that nowadays, there are far more resources in terms of allowing data scientists without exceptional software engineering skills to work in βrealisticβ environments, i.e. similar to production, in terms of computational complexity. Machine learning libraries such as Spark MLLib, Kubeflow, Tensorflow-GPU, and MMLSpark allow data preparation and model training to be distributed across multiple CPUs, GPUs, or a combination of both; at the same time, frameworks such as Apache Hadoop YARN and Kubernetes allow data scientists to work simultaneously using the same computational resources, by understanding only basic concepts about the underlying server infrastructure, such as number of available CPUs/GPUs and available memory.
The intent of this article is to provide an example of how these libraries and frameworks, as well as massive (but shared) computational resources, can be leveraged together in order to automate the creation and testing of data science models.
Frameworks like Spark and Kubeflow make easy to distribute a Big Data task, such as feature processing or machine learning model training, across GPUs and/or hundreds of CPUs without a detailed understanding of the server architecture. On the other hand, executing tasks in parallel, rather than individual parallelised tasks, is not as seamless. Of course, itβs not hard for a data scientist to work with two or three PySpark sessions in Jupyter at the same time, but for the sake of automation, we might be rather interested in running dozens and hundreds of tasks simultaneously, all specified in a programmatic way with minimal human interference.
Naturally, one may ask why bother with running tasks in parallel, instead of simply increasing the number of cores per task and make each task run in a shorter time. There are two reasons:
The processing speed often does not scale with the number of cores. For example, in the case of training machine learning models, if the data is not large enough, there might be zero improvement on computation time by increasing the number of cores from say, 10 to 100, and sometimes the computational time might even increase due to process and communication overhead, as well as the inability to leverage highly efficient single-processor implementations available in some machine learning librariesThe accuracy of machine learning algorithms models may also decrease due to parallelisation, as those algorithms often rely on suboptimal heuristics to able to run in distributed fashion, such as data split and voting
The processing speed often does not scale with the number of cores. For example, in the case of training machine learning models, if the data is not large enough, there might be zero improvement on computation time by increasing the number of cores from say, 10 to 100, and sometimes the computational time might even increase due to process and communication overhead, as well as the inability to leverage highly efficient single-processor implementations available in some machine learning libraries
The accuracy of machine learning algorithms models may also decrease due to parallelisation, as those algorithms often rely on suboptimal heuristics to able to run in distributed fashion, such as data split and voting
It is certainly possible, using deployment tools such as Airflow, to run arbitrarily complex, dynamically defined and highly automated data analytics pipelines involving parallelised tasks. However, these tools require low-level scripting and configuration and arenβt suited for quick βtrial and errorβ experiments carried on by data scientists on a daily basis, often accustomed to try and re-try ideas quickly in interactive shells such as Jupyter or Spyder. Also, taking us back to the previously mentioned βbig data software engineer vs data scientistβ conundrum, organisations might prefer data scientists to spend their time focusing on experimenting with the data and generating business value, not on getting immersed in low-level implementation or deployment.
In this article, I will show how we can make use of Apache Hadoop YARN to launch and monitor multiple jobs in a Hadoop cluster simultaneously, (including individually parallelised Spark jobs), directly from any Python code (including code from interactive Python shells such as Jupyter), via Python multithreading. While the example will consist of training multiple machine learning models in parallel, I will provide a generic framework that can be used to launch arbitrary data tasks such as feature engineering and model metric computation.
Some applications for multiple model parallel training are:
Hyper-parameter tuning: For the same training data set, simultaneously train using different model types (say Logistic Regression, Gradient Boosting and Multi-layer Perceptron) and also different hyperparameter configurations, in order to find the optimal model type/hyperparameter set as quickly as possible;
Multi-label classification: Train multiple binary/multi-class classification models in parallel, where each model training task will use a different column as the label column, such that the resulting combination of models will effectively be a multi-label classifier;
Feature reduction: For a poll of previously ranked features, train multiple models, each using only the top N-ranked features as feature columns, with N being varied across the training tasks.
In our framework, I will call the main task, i.e. the Python code that creates the additional tasks to run in parallel, as the controller task, and the tasks being started by the controller task as the subordinate tasks. (I intentionally avoid using the expression βworkerβ to avoid confusion, as in Spark, βworkerβ is a synonym for Spark executor)
The controller task is responsible for:
Defining how many subordinate tasks should be run at the same time and what to do in case one of the tasks fail;
Creating the subordinate tasks, passing the inputs to each task and getting their outputs, if any;
Generating the inputs and processing the outputs of the subordinate tasks.
An interesting aspect of YARN is that it allows Spark to be used both in the controller and subordinate tasks. Although neither is necessary, this allows us to handle arbitrarily large datasets without needing to worry ourselves with data engineering, as long as we have enough computational resources. Namely, the controller task can run Spark in client mode, and the subordinate tasks can run Spark in cluster mode:
In client mode, the Spark driver runs in the environment where the controllerβs Python code is being run (that we refer to as client environment), allowing the use of locally installed interactive shells such as Jupyter, whereas the Spark executors run in the YARN-managed Hadoop cluster, with the interactions between the driver and executors made via a third type of process named Application Master also running in the Hadoop cluster;
In cluster mode, both the driver and the executors run in the YARN-managed Hadoop cluster. Note that nothing prevent us to have the controller task also running in cluster mode, but interactive shells cannot be used in this way.
The framework is illustrated in the figure below:
There are two things to note about the example above:
Although in the example the controller task is also the driver of the Spark process (and thus associated with executors in the Hadoop cluster via the YARN Application Master), this is not necessary, although useful for example if we want to do some preprocessing on the data before deploying to the subordinate tasks;
Although the subordinate tasks do not need to use Spark parallelisation, we will use the spark-submit command to launch them, such that they will always have a Spark driver, although not necessarily Spark executors. This is the case of process 3 above.
Before I delve into parallelisation, I will first explain how to execute a subordinate task from a controller task written in Python. As mentioned before, we will do so using the spark-submit shell script contained in the Apache Spark installation, such that the subordinate task will be technically a Spark job, although it does not necessarily has executors or Spark code as I mentioned before.
In principle, we can use spark-submit from Python by simply calling the os.system function, which allows us to execute a shell command from Python. In practice, we need to be able to debug and monitor the task; for that purpose, it is better to use the excellent subprocess library. An example:
import jsonimport subprocessspark_config_cluster_path = "/home/edsonaoki/spark_config_cluster"app_name = "some_model_training"spark_config = { "spark.jars.packages" : "com.microsoft.ml.spark:mmlspark_2.11:0.18.1", "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "10", "spark.yarn.dist.files": "/home/edsonaoki/custom_packages.tar"}command = "lightgbm_training.py "\ "hdfs://user/edsonaoki/datasets/input_data.parquet "\ "hdfs://user/edsonaoki/models"spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s %s %s" % (spark_config_cluster_path, app_name, " ".join(['-conf %s="%s"' % (key, value) for key, value in spark_config.items()]), command)cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)for line in cmd_output.stdout: print(line)cmd_output.communicate()
At the beginning of the code I set the path containing the cluster mode base Spark configuration, which is later used to change the SPARK_CONF_DIR environmental variable. This is an actually crucial step if the controller task is configured to run in Spark in client mode since the Spark configuration for cluster mode is typically different than for client mode.
If you donβt know much about how to configure Spark in cluster mode, you can start by making a copy of the existing SPARK_CONF_DIR. Inside the spark-defaults.conf file we need to have
spark.submit.deployMode=cluster
instead of
spark.submit.deployMode=client
and certain configuration options, such as spark.yarn.rmProxy.enabled and the spark.driver.options.* options need to be disabled as there is no network-specific configuration for the driver when running Spark in cluster mode. Check the Spark on YARN documentation if you are in doubt. Of course, if the controller task is also running Spark in cluster mode, there is no need to have a separate configuration.
Now, looking at the subsequent steps:
app_name = "some_model_training"spark_config = { "spark.jars.packages" : "com.microsoft.ml.spark:mmlspark_2.11:0.18.1", "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "10", "spark.yarn.dist.files": "/home/edsonaoki/custom_packages.tar"}command = "lightgbm_training.py "\ "hdfs://user/edsonaoki/datasets/input_data.parquet"\ "hdfs://user/edsonaoki/models"spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s %s %s" % (spark_config_cluster_path, app_name, " ".join(['-conf %s="%s"' % (key, value) for key, value in spark_config.items()]), command)
Here I set up the application name, additional Spark configuration options and the command to be executed by the spark-submit script. These are straightforward to understand, but the application name is particularly important in our case β we will later understand why. We also submit a custom Python package via the spark.yarn.dist.files configuration parameter, which as I will show later, is especially handy since the subordinate task runs in the Hadoop cluster and hence has no access to the Python functions available in the local (client) environment.
Note also that I specify two HDFS paths as arguments to the lightgbm_training.py Python script (the subordinate taskβs code), for a similar reason to above: since the Python script will run in the Hadoop cluster, it will not have access to any files in the client environmentβs file system, and hence any files to be exchanged between controller or subordinate task must be either explicitly submitted via spark.yarn.dist.files or put into a shared file system such as HDFS or AWS S3.
After preparing the spark-submit shell command line, we are ready to execute it using the subprocess.Popen command:
cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
We set shell=True to make Python initiate a separate shell process to execute the command, rather than attempting to initiate spark-submit directly from the Python process. Although setting shell=False is generally preferable when using the subprocess library, doing so restricts the command line format and itβs not feasible in our case.
The stdout, stderr, bufsize and universal_newlines arguments are used to handle the output (STDOUT) and error messages (STDERR) issued by the shell command during execution time. When we are executing multiple subordinate tasks in parallel, we will probably want to ignore all execution time messages as they will be highly cluttered and impossible to interpret anyways. This is also useful to save memory for reasons we will explain later. However, before attempting to run multiple tasks in parallel, it is certainly best to first make sure that each individual task will work properly, by running a single subordinate task with output/error messages enabled.
In the example I set stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1 and universal_newlines=True, which basically, will direct all shell command output to a First In First Out (FIFO) queue named subprocess.PIPE.
Note that when running a Spark job in cluster mode, subprocess.PIPE will only have access to messages from the YARN Application Master, not the driver or executors. To check the driver and executor messages, you might look at the Hadoop cluster UI via your browser, or retrieve the driver and executor logs post-execution as I will show later. Additionally, if file logging is enabled in the log4j.properties file (located in the Spark configuration), the messages from the Application Master will be logged into a file rather than directed to subprocess.PIPE, so disable file logging if needed.
Finally, to display the output/error messages in the Python scriptβs output, I continue the code above as follows:
for line in cmd_output.stdout: print(line)cmd_output.communicate()
The purpose of cmd_output.communicate() is to wait for the process to finish after subprocess.PIPE is empty, i.e. no more outputs from the subordinate task are written to it. It highly advisable to read the entire queue before calling cmd_output.communicate() method as done above, to prevent the queue from increasing in size and wasting memory.
As I mentioned earlier, when we run tasks in parallel we do not want debug messages to be displayed; moreover, if a large number of tasks are sending messages to an in-memory FIFO queue at the same time, memory usage will increase messages arenβt being read from the queue as fast as they are generated. A version of the code from the previous section without debugging, starting with the call to spark-submit, is as follows:
cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)def getYARNApplicationID(app_name): state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output(["yarn","application","-list", "-appStates",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split("\n")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\t')[0] break return application_idmax_wait_time_job_start_s = 120start_time = time.time()while yarn_application_id == '' and time.time()-start_time\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name)cmd_output.wait()if yarn_application_id == '': raise RuntimeError("Couldn't get yarn application ID for application %s" % app_name)
The code starts by launching the subordinate task as before, but with debugging disabled:
cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
Since there are no debug messages to be displayed when the process is running, we use cmd_output.wait instead of cmd_output.communicate() to wait for the task to finish. Note that although we wonβt see the Application Masterβs messages, we can still debug the Spark jobβs driver and executor in runtime via the Hadoop cluster UI.
However, we still need to be able to monitor the task from a programmatic point of view; more specifically, the controller task needs to know when the subordinate task has finished, whether it was successful, and take appropriate action in case of failure. For that purpose, we can use the application name that we set in the beginning:
app_name = "some_model_training"
The application name can be used by YARN to retrieve the YARN application ID, which allows us to retrieve the status and other information about the subordinate task. Again, we can resort to the subprocess library to define a function that can retrieve the application ID from the application name:
def getYARNApplicationID(app_name): state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output(["yarn","application","-list", "-appStates",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split("\n")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\t')[0] break return application_id
Observe that getYARNApplicationID parses the output of the yarn application -list shell command. Depending on your Hadoop version the output format may be slightly different and the parsing needs to be adjusted accordingly. If in doubt, you can test the format by running the following command in the terminal:
$ yarn application -list -appStates RUNNING,ACCEPTED,FINISHED,KILLED,FAILED
The tricky aspect is that this method can only work if the application name is unique in the Hadoop cluster. Therefore, you need to make sure you are creating a unique application name, for instance by including timestamps, random strings, your user ID, etc. Optionally, you can also add other filters when attempting to parse the output of yarn application -list, for example, the user ID, the YARN queue name or the time of the day.
Since the Spark job takes some time to be registered in YARN after it has been launched using spark-submit, I implemented the loop:
max_wait_time_job_start_s = 120start_time = time.time()while yarn_application_id == '' and time.time()-start_time\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name)
where max_wait_time_job_start_s is the time to wait for the registration in seconds, which may need to be adjusted according to your environment.
The meaning of
if yarn_application_id == '': raise RuntimeError("Couldn't get yarn application ID for"\ " application %s" % app_name)
is straightforward; if there is no application ID, it means the Spark job has not been successfully launched and we need to throw an exception. This may also indicate that we need to increase max_wait_time_job_start_s, or change how the output of yarn application -list is parsed inside getYARNApplicationID.
After the subordinate task has finished, checking its final status can be done as follows:
def getSparkJobFinalStatus(application_id): out = subprocess.check_output(["yarn","application", "-status",application_id], stderr=subprocess.DEVNULL, universal_newlines=True) status_lines = out.split("\n") state = '' for line in status_lines: if len(line) > 15 and line[1:15] == "Final-State : ": state = line[15:] break return statefinal_status = getSparkJobFinalStatus(yarn_application_id)
where again, you may need to tune the parsing of yarn application -status depending on your Hadoop version. How to handle the final status is entirely up to you, but one possibility is to store the Spark jobβs driver and executor log in a file and raise an exception. For example:
log_path = "/home/edsonaoki/logs/%s_%s.log" % (app_name, yarn_application_id)if final_status != "SUCCEEDED": cmd_output = subprocess.Popen(["yarn","logs", "-applicationId",yarn_application_id], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_lines=True) with open(log_path, "w") as f: for line in cmd_output.stdout: f.write(line) print("Written log of failed task to %s" % log_path) cmd_output.communicate() raise RuntimeError("Task %s has not succeeded" % app_name)
If not obvious, before attempting to execute subordinate tasks in parallel, make sure to test as many as tasks as possible without parallelisation, as debugging parallel tasks can be incredibly difficult.
To perform parallelisation we will use Pythonβs concurrent library. The concurrent library uses multithreading and not multiprocessing; i.e. the threads do run in the same processor, such that from the side of the controller task, there is no real parallel processing. However, since the threads started in the controller task are in I/O mode (unblocked) when waiting for the subordinate tasks to finish, multiple subordinate tasks can be launched asynchronously, such that they will actually run in parallel in the side of the Hadoop cluster. While we can technically use the multiprocessing library instead of the concurrent library to achieve parallelism also from the controller taskβs side, I would advise against it as it will substantially increase the memory consumption in the client environment for little benefit β the idea is that the βtough processingβ is done in the Hadoop cluster.
When we launch a Spark job, we are typically aware of the constraints of processing and memory in the cluster environment, especially in the case of a shared environment, and use configuration parameters such as spark.executor.memory and spark.executor.instances in order to control the taskβs processing and memory consumption. The same needs to be done in our case; we need to limit the number of subordinate tasks that execute simultaneously according to the availability of computational resources in the cluster, such that when we reach this limit, a subordinate task can only be started after another has finished.
The concurrent package offers the futures.ThreadPoolExecutor class which allows us to start multiple threads and wait for them to finish. The class also allows us to limit the number of threads doing active processing(i.e. not blocked by I/O) via the max_workers argument. However, as I mentioned before, a thread in the controller task is treated as being blocked by I/O when the subordinate task is running, which means that max_workers wonβt effectively limit the number of threads. As result, all subordinate tasks will be submitted nearly simultaneously and the Hadoop cluster can become overloaded.
This can be solved rather easily by modifying the futures.ThreadPoolExecutor class as follows:
import concurrent.futuresfrom queue import Queueclass ThreadPoolExecutorWithQueueSizeLimit( concurrent.futures.ThreadPoolExecutor): def __init__(self, maxsize, *args, **kwargs): super(ThreadPoolExecutorWithQueueSizeLimit, self).__init__(*args, **kwargs) self._work_queue = Queue(maxsize=maxsize)
This new class ThreadPoolExecutorWithQueueSizeLimit works exactly like futures.ThreadPoolExecutor, but it wonβt allow more than maxsize threads to exist at any point of time, effectively limiting the number of subordinate tasks running simultaneously in the Hadoop cluster.
We now need to define a function, containing the execution code of the thread, which can be passed as an argument to the class ThreadPoolExecutorWithQueueSizeLimit. Based on the previous code for executing a subordinate task from Python without debugging messages, I present the following generic thread execution function:
def executeThread(app_name, spark_submit_cmd, error_log_dir, max_wait_time_job_start_s=120): cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) start_time = time.time() while yarn_application_id == '' and time.time()-start_time\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name) cmd_output.wait() if yarn_application_id == '': raise RuntimeError("Couldn't get yarn application ID for"\ "application %s" % app_name) final_status = getSparkJobFinalStatus(yarn_application_id) log_path = %s/%s_%s.log" % (error_log_dir, app_name, yarn_application_id) if final_status != "SUCCEEDED": cmd_output = subprocess.Popen(["yarn","logs", "-applicationId",yarn_application_id], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_lines=True) with open(log_path, "w") as f: for line in cmd_output.stdout: f.write(line) print("Written log of failed task to %s" % log_path) cmd_output.communicate() raise RuntimeError("Task %s has not succeeded" % app_name) return True
As you can see, the function uses the previously defined functions getYARNApplicationID and getSparkJobFinalStatus, and the application name, the spark-submit command line and the directory to store the error logs are passed as arguments to the function.
Note that the function raises an exception in case the yarn application ID cannot be found, or the status of the Spark job is not successful. But depending on the case, we may just want the function to return a False value, such that the controller task knows that this particular subordinate task has not been successful and needs to be executed again, without need to run again the tasks that have been already successful. In this case, we just need to replace line
raise RuntimeError("Couldn't get yarn application ID for application %s" % app_name)
and
raise RuntimeError("Task %s has not succeeded" % app_name)
with
return False
The next step is to create a generic code to start the threads and wait for their completion, as follows:
def executeAllThreads(dict_spark_submit_cmds, error_log_dir, dict_success_app=None): if dict_success_app is None: dict_success_app = {app_name: False for app_name in dict_spark_submit_cmds.keys()} with ThreadPoolExecutorWithQueueSizeLimit(maxsize=max_parallel, max_workers=max_parallel) as executor: future_to_app_name = { executor.submit( executeThread, app_name, spark_submit_cmd, error_log_dir, ): app_name for app_name, spark_submit_cmd in dict_spark_submit_cmds.items() if dict_success_app[app_name] == False } for future in concurrent.futures\ .as_completed(future_to_app_name): app_name = future_to_app_name[future] try: dict_success_app[app_name] = future.result() except Exception as exc: print('Subordinate task %s generated exception %s' % (app_name, exc)) raise return dict_success_app
The mandatory arguments to the function are:
a dictionary with application names as keys and the corresponding job submission command lines as values;
the directory to store the error logs.
The output of the function is also a dictionary containing the return value (True or False) of each subordinate task, indexed by application name. The optional argument is dict_success_app, that can be the return value from a previous execution from the function, in case we only want to run the subordinate tasks that have not been already successful. I will show later how that can be accomplished.
For the readerβs convenience, I put together the complete code of the parallelisation framework below:
import subprocessimport concurrent.futuresfrom queue import Queueclass ThreadPoolExecutorWithQueueSizeLimit( concurrent.futures.ThreadPoolExecutor): def __init__(self, maxsize, *args, **kwargs): super(ThreadPoolExecutorWithQueueSizeLimit, self).__init__(*args, **kwargs) self._work_queue = Queue(maxsize=maxsize)def getYARNApplicationID(app_name): state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output(["yarn","application","-list", "-appStates",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split("\n")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\t')[0] break return application_iddef getSparkJobFinalStatus(application_id): out = subprocess.check_output(["yarn","application", "-status",application_id], stderr=subprocess.DEVNULL, universal_newlines=True) status_lines = out.split("\n") state = '' for line in status_lines: if len(line) > 15 and line[1:15] == "Final-State : ": state = line[15:] break return statedef executeThread(app_name, spark_submit_cmd, error_log_dir, max_wait_time_job_start_s = 120): cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) start_time = time.time() while yarn_application_id == '' and time.time()-start_time\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name) cmd_output.wait() if yarn_application_id == '': raise RuntimeError("Couldn't get yarn application ID for"\ " application %s" % (app_name)) # Replace line above by the following if you do not # want a failed task to stop the entire process: # return False final_status = getSparkJobFinalStatus(yarn_application_id) log_path = %s/%s_%s.log" % (error_log_dir, app_name, yarn_application_id) if final_status != "SUCCEEDED": cmd_output = subprocess.Popen(["yarn","logs", "-applicationId",yarn_application_id], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_lines=True) with open(log_path, "w") as f: for line in cmd_output.stdout: f.write(line) print("Written log of failed task to %s" % log_path) cmd_output.communicate() raise RuntimeError("Task %s has not succeeded" % app_name) # Replace line above by the following if you do not # want a failed task to stop the entire process: # return False return Truedef executeAllThreads(dict_spark_submit_cmds, error_log_dir, dict_success_app=None): if dict_success_app is None: dict_success_app = {app_name: False for app_name in dict_spark_submit_cmds.keys()} with ThreadPoolExecutorWithQueueSizeLimit(maxsize=max_parallel, max_workers=max_parallel) as executor: future_to_app_name = { executor.submit( executeThread, app_name, spark_submit_cmd, error_log_dir, ): app_name for app_name, spark_submit_cmd in dict_spark_submit_cmds.items() if dict_success_app[app_name] == False } for future in concurrent.futures\ .as_completed(future_to_app_name): app_name = future_to_app_name[future] try: dict_success_app[app_name] = future.result() except Exception as exc: print('Subordinate task %s generated exception %s' % (app_name, exc)) raise return dict_success_app
In this example, I will show how to use the framework above to parallelise training of a multi-label classifier with hundreds of labels. Basically, we will train multiple binary classifiers in parallel, where the training of each binary model is itself parallelised via Spark. The individual binary classifiers are Gradient Boosting models trained using the Spark version of the popular LightGBM package, contained in the Microsoft Machine Learning for Spark (MMLSpark) library.
By using the framework above, there are only two other things that the controller task needs to do:
Prior to calling the executeAllThreads function, set up the application name and spark-submit command for each subordinate task;After returning from the executeAllThreads function, check which subordinate tasks have been successful and handle their output appropriately.
Prior to calling the executeAllThreads function, set up the application name and spark-submit command for each subordinate task;
After returning from the executeAllThreads function, check which subordinate tasks have been successful and handle their output appropriately.
For the first part, we can start by looking at our previous example where we are submitting a standalone subordinate job:
spark_config_cluster_path = "/home/edsonaoki/spark_config_cluster"app_name = "some_model_training"spark_config = { "spark.jars.packages" : "com.microsoft.ml.spark:mmlspark_2.11:0.18.1", "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "10", "spark.yarn.dist.files": "/home/edsonaoki/custom_packages.tar"}command = "lightgbm_training.py "\ "hdfs://user/edsonaoki/datasets/input_data.parquet"\ "hdfs://user/edsonaoki/models"spark_submit_cmd = "SPARK_CONF_DIR=%s spark-submit -name %s %s %s" % (spark_config_cluster_path, app_name, " ".join(['-conf %s="%s"' % (key, value) for key, value in spark_config.items()]), command)
What do we need to adapt the code for multi-label classification? First, for the reasons already mentioned, the application name needs to be completely unique. Assuming that the label columns of the dataset input_data.parquet are contained in a variable lst_labels, one way to ensure likely unique applications IDs for each subordinate task would something like:
import timecurr_timestamp = int(time.time()*1000)app_names = ["model_training_%s_%d" % (label,curr_timestamp) for label in lst_labels]
This ensures that application names will be unique as long as the controller task is not started more once in the same millisecond (of course, if we have a shared YARN cluster other adaptions may be needed to make the application names unique, such as adding the username to the application name).
We are yet to discuss how the subordinate task code contained in lightgbm_training.py looks like, but letβs suppose it:
Performs some pre-processing on the training data, based on the label column (such as dataset balancing), using a function contained in the custom_packages.tar file submitted along with the Spark job
Trains the model based on the features column and the label column
Saves the trained model in the HDFS system
In this case, the controller task needs to pass the HDFS path of the training dataset, the HDFS path to store the trained models, and the label to be used for each subordinate task, via command-line arguments to lightgbm_training.py. This can be done as shown below:
dict_spark_submit_cmds = dict()for i in range(len(lst_labels)): command = "lightgbm_training.py "\ "hdfs://user/edsonaoki/datasets/input_data.parquet "\ "hdfs://user/edsonaoki/models "\ +lst_labels[i] spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s "\ "%s %s" % (spark_config_cluster_path, app_names[i], " ".join(['-conf %s="%s"' % (key, value) for key, value in spark_config.items()]), command) dict_spark_submit_cmds[app_names[i]] = spark_submit_cmd
Of course, there are many other ways to customise the subordinate tasks. We might want to use different model training hyperparameters, different datasets, different Spark configurations, or even use different Python scripts for each subordinate task. The fact that we allow the spark-submit command line to be unique for each subtask allows complete customisation.
For the readerβs convenience, I put together the controller taskβs code prior to and until calling executeAllThreads:
import timespark_config_cluster_path = "/home/edsonaoki/spark_config_cluster"curr_timestamp = int(time.time()*1000)app_names = ["model_training_%s_%d" % (label,curr_timestamp) for label in lst_labels]spark_config = { "spark.jars.packages" : "com.microsoft.ml.spark:mmlspark_2.11:0.18.1", "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "10", "spark.yarn.dist.files": "/home/edsonaoki/custom_packages.tar"}dict_spark_submit_cmds = dict()for i in range(len(lst_labels)): command = "lightgbm_training.py "\ "hdfs://user/edsonaoki/datasets/input_data.parquet "\ "hdfs://user/edsonaoki/models "\ +lst_labels[i] spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s "\ "%s %s" % (spark_config_cluster_path, app_names[i], " ".join(['-conf %s="%s"' % (key, value) for key, value in spark_config.items()]), command) dict_spark_submit_cmds[app_names[i]] = spark_submit_cmdexecuteAllThreads(dict_spark_submit_cmds, "/home/edsonaoki/logs")
For the second part, i.e. what the controller task should do after returning from executeAllThreads, assuming that the successful tasks have saved the trained models in the HDFS system, we can just open these files and process them as appropriate, for instance applying the models to some appropriate validation dataset, generating plots and computing performance metrics.
If we use the parallelisation framework presented earlier as it is, there wonβt be βunsuccessful subordinate tasksβ as any failure will result in an exception being raised. But if we modified executeThread to return False in case of task failure, we might store the returning dict_success_app dictionary in a JSON or Pickle file such that we can later investigate and fix the failed tasks. Finally, we can call again executeAllThreads with the optional argument dict_success_app set such that we re-run only the failed tasks.
Let us now write the code of the subordinate task in the lightgbm_training.py script. The first step is to read the input arguments of the script, i.e. the path of the training dataset in the HDFS filesystem, the path to store the models and the name of the label column:
import systrain_data_path = sys.argv[1]model_path = sys.argv[2]label = sys.argv[3]
Since we are using the Spark version of LightGBM, we need to create a Spark session, which we do as follows:
from pyspark.sql import SparkSessionspark = SparkSession.builder.getOrCreate()spark.sparkContext.addPyFile("./custom_packages.tar")
Note that there is no need to set up any configuration for the Spark session, as it has been already done in the command line submitted by the controller task. Also, since we explicitly submitted a custom Python package custom_packages.tar to the Spark job, we need to use the addPyFile function to make the contents of the package usable inside our code, as the package is not included in the PYTHONPATH environment variable of the Hadoop cluster.
The code that does the actual processing in the subordinate task is pretty straightforward. The subordinate task will read the training data, call some pre-processing function inside custom_packages.tar (say custom_data_preprocessing.datasetBalancing), perform the model training, and save the trained model with a unique name back in the HDFS file system:
from custom_data_preprocessing import datasetBalancingfrom mmlspark import LightGBMClassifierdf_train_data = spark.read.parquet(train_data_path)df_preproc_data = datasetBalancing(df_train_data, label)untrained_model = LightGBMClassifier(learningRate=0.3, numIterations=150, numLeaves=45)\ .setFeaturesCol("features")\ .setLabelCol(label)trained_model = untrained_model.fit(df_preproc_data)trained_model.write().overwrite()\ .save(model_path + "/trained_model_%s.mdl" % label)spark.stop()
The full code of lightgbm_training.py is put together below for the readerβs convenience:
import systrain_data_path = sys.argv[1]model_path = sys.argv[2]label = sys.argv[3]from pyspark.sql import SparkSessionspark = SparkSession.builder.getOrCreate()spark.sparkContext.addPyFile("./custom_packages.tar")from custom_data_preprocessing import datasetBalancingfrom mmlspark import LightGBMClassifierdf_train_data = spark.read.parquet(train_data_path)df_preproc_data = datasetBalancing(df_train_data, label)untrained_model = LightGBMClassifier(learningRate=0.3, numIterations=150, numLeaves=45)\ .setFeaturesCol("features")\ .setLabelCol(label)trained_model = untrained_model.fit(df_preproc_data)trained_model.write().overwrite()\ .save(model_path + "/trained_model_%s.mdl" % label)spark.stop()
It is easy to see that the framework presented in this article can be re-used for various tasks other than multiple machine learning model training. A question is that may arise is whether it can be used for different cluster environments, for instance with Spark on Mesos rather than Spark on YARN. I believe so, but some adaptations are needed as the presented code relies heavily on the yarn command to monitor the subordinate tasks.
By using this framework, data scientists can focus more of their time on designing the data tasks, not on manually executing them for dozens or hundreds of small variations. Another advantage is that by harnessing parallelisation, the tasks can be done in much less time, or from a different perspective, without requiring multiple data scientists to work simultaneously to complete the tasks in the same amount of time.
Naturally, this article presents only one of many ways to improve data science automation. Organisations that realise that the time of data scientists and other skilled tech professionals is highly valuable will certainly find increasingly more ways to help these professionals focus on higher-level problems.
|
[
{
"code": null,
"e": 191,
"s": 172,
"text": "image: Freepik.com"
},
{
"code": null,
"e": 766,
"s": 191,
"text": "To objective of this article is to show how a single data scientist can launch dozens or hundreds of data science-related tasks simultaneously (including machine learning model training) without using complex deployment frameworks. In fact, the tasks can be launched from a βdata scientistβ-friendly interface, namely, a single Python script which can be run from an interactive shell such as Jupyter, Spyder or Cloudera Workbench. The tasks can be themselves parallelised in order to handle large amounts of data, such that we effectively add a second layer of parallelism."
},
{
"code": null,
"e": 1130,
"s": 766,
"text": "Data scientists who wish to do more work with less time, by making use of large scale computational resources (e.g. clusters or public clouds), possibly shared with other users via YARN. To understand this article you need a good knowledge of Python, working knowledge of Spark, and at least basic understanding about Hadoop YARN architecture and shell scripting;"
},
{
"code": null,
"e": 1273,
"s": 1130,
"text": "Machine learning engineers who are supporting data scientists on making use of available computational capacity and operating large scale data"
},
{
"code": null,
"e": 1517,
"s": 1273,
"text": "βData scienceβ and βautomationβ are two words that invariably go hand-in-hand with each other, as one of the keys goals of machine learning is to allow machines to perform tasks more quickly, with lower cost, and/or better quality than humans."
},
{
"code": null,
"e": 1965,
"s": 1517,
"text": "Naturally, it wouldnβt make sense for an organization to spend more on tech staff that are supposed to develop and maintain systems that automate work (data scientists, data engineers, DevOps engineers, software engineers and others) than on the staff that do the work manually. Itβs not thus surprising that a recurrent discussion is how much we can automate the work of data science teams themselves, for instance via automated machine learning."
},
{
"code": null,
"e": 2407,
"s": 1965,
"text": "To achieve cost-effective data science automation, it is imperative to able to harness computational power from public or private clouds; after all, the cost of hardware is quite low compared to the cost of highly skilled technical staff. While technology to achieve so is certainly available, many organisations ended up facing the βbig data software engineer vs data scientist conundrumβ, or more precisely, the drastic discrepancy between"
},
{
"code": null,
"e": 2626,
"s": 2407,
"text": "βBig data software engineer skillsβ, i.e. skills necessary to manipulate massive amounts of data in complex computational environments, and run these processes in a reliable manner along with other concurrent processes"
},
{
"code": null,
"e": 2777,
"s": 2626,
"text": "βData scientist skillsβ, i.e. skills necessary to apply algorithms and mathematics to the data to extract insights valuable from a business standpoint"
},
{
"code": null,
"e": 2796,
"s": 2777,
"text": "image: Freepik.com"
},
{
"code": null,
"e": 3294,
"s": 2796,
"text": "Some organisations would make βdata scientistsβ responsible for developing the analytics models in some sort of βcontrolled analytics environmentβ where one does not need to think too much about the underlying computational resources or sharing the resources with other processes, and βbig data software engineersβ responsible for coding βproduction-readyβ versions of the models developed by data scientists and deploy them into production. This setup resulted in obvious inefficiencies, such as:"
},
{
"code": null,
"e": 4018,
"s": 3294,
"text": "Data scientists developing sub-optimal models due to not making use of large scale data and computational resources. In some organisations, data scientists even ended up working with single-node frameworks such as Pandas/Scikit-Learn and basing their models entirely on small datasets obtained via sampling or over-engineered features;Developed models performing well on analytics environment but not performing well, or being completely unable to run, in production environment;The difficulty to evaluate generation of business value, identify and fix problems, as well as making iterative improvements, as data scientists end up dramatically losing oversight of the analytics process once models are sent into production."
},
{
"code": null,
"e": 4354,
"s": 4018,
"text": "Data scientists developing sub-optimal models due to not making use of large scale data and computational resources. In some organisations, data scientists even ended up working with single-node frameworks such as Pandas/Scikit-Learn and basing their models entirely on small datasets obtained via sampling or over-engineered features;"
},
{
"code": null,
"e": 4499,
"s": 4354,
"text": "Developed models performing well on analytics environment but not performing well, or being completely unable to run, in production environment;"
},
{
"code": null,
"e": 4744,
"s": 4499,
"text": "The difficulty to evaluate generation of business value, identify and fix problems, as well as making iterative improvements, as data scientists end up dramatically losing oversight of the analytics process once models are sent into production."
},
{
"code": null,
"e": 5025,
"s": 4744,
"text": "Different organisations dealt with this situation with different ways, either by forcing big data software engineers and data scientists learn the skills of the βother roleβ, or by creating a βthird roleβ, named βMachine Learning Engineerβ to bridge the gap between the two roles."
},
{
"code": null,
"e": 5771,
"s": 5025,
"text": "But the fact is that nowadays, there are far more resources in terms of allowing data scientists without exceptional software engineering skills to work in βrealisticβ environments, i.e. similar to production, in terms of computational complexity. Machine learning libraries such as Spark MLLib, Kubeflow, Tensorflow-GPU, and MMLSpark allow data preparation and model training to be distributed across multiple CPUs, GPUs, or a combination of both; at the same time, frameworks such as Apache Hadoop YARN and Kubernetes allow data scientists to work simultaneously using the same computational resources, by understanding only basic concepts about the underlying server infrastructure, such as number of available CPUs/GPUs and available memory."
},
{
"code": null,
"e": 6015,
"s": 5771,
"text": "The intent of this article is to provide an example of how these libraries and frameworks, as well as massive (but shared) computational resources, can be leveraged together in order to automate the creation and testing of data science models."
},
{
"code": null,
"e": 6667,
"s": 6015,
"text": "Frameworks like Spark and Kubeflow make easy to distribute a Big Data task, such as feature processing or machine learning model training, across GPUs and/or hundreds of CPUs without a detailed understanding of the server architecture. On the other hand, executing tasks in parallel, rather than individual parallelised tasks, is not as seamless. Of course, itβs not hard for a data scientist to work with two or three PySpark sessions in Jupyter at the same time, but for the sake of automation, we might be rather interested in running dozens and hundreds of tasks simultaneously, all specified in a programmatic way with minimal human interference."
},
{
"code": null,
"e": 6856,
"s": 6667,
"text": "Naturally, one may ask why bother with running tasks in parallel, instead of simply increasing the number of cores per task and make each task run in a shorter time. There are two reasons:"
},
{
"code": null,
"e": 7575,
"s": 6856,
"text": "The processing speed often does not scale with the number of cores. For example, in the case of training machine learning models, if the data is not large enough, there might be zero improvement on computation time by increasing the number of cores from say, 10 to 100, and sometimes the computational time might even increase due to process and communication overhead, as well as the inability to leverage highly efficient single-processor implementations available in some machine learning librariesThe accuracy of machine learning algorithms models may also decrease due to parallelisation, as those algorithms often rely on suboptimal heuristics to able to run in distributed fashion, such as data split and voting"
},
{
"code": null,
"e": 8077,
"s": 7575,
"text": "The processing speed often does not scale with the number of cores. For example, in the case of training machine learning models, if the data is not large enough, there might be zero improvement on computation time by increasing the number of cores from say, 10 to 100, and sometimes the computational time might even increase due to process and communication overhead, as well as the inability to leverage highly efficient single-processor implementations available in some machine learning libraries"
},
{
"code": null,
"e": 8295,
"s": 8077,
"text": "The accuracy of machine learning algorithms models may also decrease due to parallelisation, as those algorithms often rely on suboptimal heuristics to able to run in distributed fashion, such as data split and voting"
},
{
"code": null,
"e": 9064,
"s": 8295,
"text": "It is certainly possible, using deployment tools such as Airflow, to run arbitrarily complex, dynamically defined and highly automated data analytics pipelines involving parallelised tasks. However, these tools require low-level scripting and configuration and arenβt suited for quick βtrial and errorβ experiments carried on by data scientists on a daily basis, often accustomed to try and re-try ideas quickly in interactive shells such as Jupyter or Spyder. Also, taking us back to the previously mentioned βbig data software engineer vs data scientistβ conundrum, organisations might prefer data scientists to spend their time focusing on experimenting with the data and generating business value, not on getting immersed in low-level implementation or deployment."
},
{
"code": null,
"e": 9609,
"s": 9064,
"text": "In this article, I will show how we can make use of Apache Hadoop YARN to launch and monitor multiple jobs in a Hadoop cluster simultaneously, (including individually parallelised Spark jobs), directly from any Python code (including code from interactive Python shells such as Jupyter), via Python multithreading. While the example will consist of training multiple machine learning models in parallel, I will provide a generic framework that can be used to launch arbitrary data tasks such as feature engineering and model metric computation."
},
{
"code": null,
"e": 9669,
"s": 9609,
"text": "Some applications for multiple model parallel training are:"
},
{
"code": null,
"e": 9979,
"s": 9669,
"text": "Hyper-parameter tuning: For the same training data set, simultaneously train using different model types (say Logistic Regression, Gradient Boosting and Multi-layer Perceptron) and also different hyperparameter configurations, in order to find the optimal model type/hyperparameter set as quickly as possible;"
},
{
"code": null,
"e": 10248,
"s": 9979,
"text": "Multi-label classification: Train multiple binary/multi-class classification models in parallel, where each model training task will use a different column as the label column, such that the resulting combination of models will effectively be a multi-label classifier;"
},
{
"code": null,
"e": 10441,
"s": 10248,
"text": "Feature reduction: For a poll of previously ranked features, train multiple models, each using only the top N-ranked features as feature columns, with N being varied across the training tasks."
},
{
"code": null,
"e": 10790,
"s": 10441,
"text": "In our framework, I will call the main task, i.e. the Python code that creates the additional tasks to run in parallel, as the controller task, and the tasks being started by the controller task as the subordinate tasks. (I intentionally avoid using the expression βworkerβ to avoid confusion, as in Spark, βworkerβ is a synonym for Spark executor)"
},
{
"code": null,
"e": 10830,
"s": 10790,
"text": "The controller task is responsible for:"
},
{
"code": null,
"e": 10943,
"s": 10830,
"text": "Defining how many subordinate tasks should be run at the same time and what to do in case one of the tasks fail;"
},
{
"code": null,
"e": 11042,
"s": 10943,
"text": "Creating the subordinate tasks, passing the inputs to each task and getting their outputs, if any;"
},
{
"code": null,
"e": 11117,
"s": 11042,
"text": "Generating the inputs and processing the outputs of the subordinate tasks."
},
{
"code": null,
"e": 11535,
"s": 11117,
"text": "An interesting aspect of YARN is that it allows Spark to be used both in the controller and subordinate tasks. Although neither is necessary, this allows us to handle arbitrarily large datasets without needing to worry ourselves with data engineering, as long as we have enough computational resources. Namely, the controller task can run Spark in client mode, and the subordinate tasks can run Spark in cluster mode:"
},
{
"code": null,
"e": 11973,
"s": 11535,
"text": "In client mode, the Spark driver runs in the environment where the controllerβs Python code is being run (that we refer to as client environment), allowing the use of locally installed interactive shells such as Jupyter, whereas the Spark executors run in the YARN-managed Hadoop cluster, with the interactions between the driver and executors made via a third type of process named Application Master also running in the Hadoop cluster;"
},
{
"code": null,
"e": 12202,
"s": 11973,
"text": "In cluster mode, both the driver and the executors run in the YARN-managed Hadoop cluster. Note that nothing prevent us to have the controller task also running in cluster mode, but interactive shells cannot be used in this way."
},
{
"code": null,
"e": 12252,
"s": 12202,
"text": "The framework is illustrated in the figure below:"
},
{
"code": null,
"e": 12306,
"s": 12252,
"text": "There are two things to note about the example above:"
},
{
"code": null,
"e": 12624,
"s": 12306,
"text": "Although in the example the controller task is also the driver of the Spark process (and thus associated with executors in the Hadoop cluster via the YARN Application Master), this is not necessary, although useful for example if we want to do some preprocessing on the data before deploying to the subordinate tasks;"
},
{
"code": null,
"e": 12877,
"s": 12624,
"text": "Although the subordinate tasks do not need to use Spark parallelisation, we will use the spark-submit command to launch them, such that they will always have a Spark driver, although not necessarily Spark executors. This is the case of process 3 above."
},
{
"code": null,
"e": 13274,
"s": 12877,
"text": "Before I delve into parallelisation, I will first explain how to execute a subordinate task from a controller task written in Python. As mentioned before, we will do so using the spark-submit shell script contained in the Apache Spark installation, such that the subordinate task will be technically a Spark job, although it does not necessarily has executors or Spark code as I mentioned before."
},
{
"code": null,
"e": 13569,
"s": 13274,
"text": "In principle, we can use spark-submit from Python by simply calling the os.system function, which allows us to execute a shell command from Python. In practice, we need to be able to debug and monitor the task; for that purpose, it is better to use the excellent subprocess library. An example:"
},
{
"code": null,
"e": 14505,
"s": 13569,
"text": "import jsonimport subprocessspark_config_cluster_path = \"/home/edsonaoki/spark_config_cluster\"app_name = \"some_model_training\"spark_config = { \"spark.jars.packages\" : \"com.microsoft.ml.spark:mmlspark_2.11:0.18.1\", \"spark.dynamicAllocation.enabled\": \"false\", \"spark.executor.instances\": \"10\", \"spark.yarn.dist.files\": \"/home/edsonaoki/custom_packages.tar\"}command = \"lightgbm_training.py \"\\ \"hdfs://user/edsonaoki/datasets/input_data.parquet \"\\ \"hdfs://user/edsonaoki/models\"spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s %s %s\" % (spark_config_cluster_path, app_name, \" \".join(['-conf %s=\"%s\"' % (key, value) for key, value in spark_config.items()]), command)cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)for line in cmd_output.stdout: print(line)cmd_output.communicate()"
},
{
"code": null,
"e": 14869,
"s": 14505,
"text": "At the beginning of the code I set the path containing the cluster mode base Spark configuration, which is later used to change the SPARK_CONF_DIR environmental variable. This is an actually crucial step if the controller task is configured to run in Spark in client mode since the Spark configuration for cluster mode is typically different than for client mode."
},
{
"code": null,
"e": 15053,
"s": 14869,
"text": "If you donβt know much about how to configure Spark in cluster mode, you can start by making a copy of the existing SPARK_CONF_DIR. Inside the spark-defaults.conf file we need to have"
},
{
"code": null,
"e": 15085,
"s": 15053,
"text": "spark.submit.deployMode=cluster"
},
{
"code": null,
"e": 15096,
"s": 15085,
"text": "instead of"
},
{
"code": null,
"e": 15127,
"s": 15096,
"text": "spark.submit.deployMode=client"
},
{
"code": null,
"e": 15536,
"s": 15127,
"text": "and certain configuration options, such as spark.yarn.rmProxy.enabled and the spark.driver.options.* options need to be disabled as there is no network-specific configuration for the driver when running Spark in cluster mode. Check the Spark on YARN documentation if you are in doubt. Of course, if the controller task is also running Spark in cluster mode, there is no need to have a separate configuration."
},
{
"code": null,
"e": 15574,
"s": 15536,
"text": "Now, looking at the subsequent steps:"
},
{
"code": null,
"e": 16194,
"s": 15574,
"text": "app_name = \"some_model_training\"spark_config = { \"spark.jars.packages\" : \"com.microsoft.ml.spark:mmlspark_2.11:0.18.1\", \"spark.dynamicAllocation.enabled\": \"false\", \"spark.executor.instances\": \"10\", \"spark.yarn.dist.files\": \"/home/edsonaoki/custom_packages.tar\"}command = \"lightgbm_training.py \"\\ \"hdfs://user/edsonaoki/datasets/input_data.parquet\"\\ \"hdfs://user/edsonaoki/models\"spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s %s %s\" % (spark_config_cluster_path, app_name, \" \".join(['-conf %s=\"%s\"' % (key, value) for key, value in spark_config.items()]), command)"
},
{
"code": null,
"e": 16753,
"s": 16194,
"text": "Here I set up the application name, additional Spark configuration options and the command to be executed by the spark-submit script. These are straightforward to understand, but the application name is particularly important in our case β we will later understand why. We also submit a custom Python package via the spark.yarn.dist.files configuration parameter, which as I will show later, is especially handy since the subordinate task runs in the Hadoop cluster and hence has no access to the Python functions available in the local (client) environment."
},
{
"code": null,
"e": 17238,
"s": 16753,
"text": "Note also that I specify two HDFS paths as arguments to the lightgbm_training.py Python script (the subordinate taskβs code), for a similar reason to above: since the Python script will run in the Hadoop cluster, it will not have access to any files in the client environmentβs file system, and hence any files to be exchanged between controller or subordinate task must be either explicitly submitted via spark.yarn.dist.files or put into a shared file system such as HDFS or AWS S3."
},
{
"code": null,
"e": 17354,
"s": 17238,
"text": "After preparing the spark-submit shell command line, we are ready to execute it using the subprocess.Popen command:"
},
{
"code": null,
"e": 17507,
"s": 17354,
"text": "cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)"
},
{
"code": null,
"e": 17846,
"s": 17507,
"text": "We set shell=True to make Python initiate a separate shell process to execute the command, rather than attempting to initiate spark-submit directly from the Python process. Although setting shell=False is generally preferable when using the subprocess library, doing so restricts the command line format and itβs not feasible in our case."
},
{
"code": null,
"e": 18508,
"s": 17846,
"text": "The stdout, stderr, bufsize and universal_newlines arguments are used to handle the output (STDOUT) and error messages (STDERR) issued by the shell command during execution time. When we are executing multiple subordinate tasks in parallel, we will probably want to ignore all execution time messages as they will be highly cluttered and impossible to interpret anyways. This is also useful to save memory for reasons we will explain later. However, before attempting to run multiple tasks in parallel, it is certainly best to first make sure that each individual task will work properly, by running a single subordinate task with output/error messages enabled."
},
{
"code": null,
"e": 18732,
"s": 18508,
"text": "In the example I set stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1 and universal_newlines=True, which basically, will direct all shell command output to a First In First Out (FIFO) queue named subprocess.PIPE."
},
{
"code": null,
"e": 19328,
"s": 18732,
"text": "Note that when running a Spark job in cluster mode, subprocess.PIPE will only have access to messages from the YARN Application Master, not the driver or executors. To check the driver and executor messages, you might look at the Hadoop cluster UI via your browser, or retrieve the driver and executor logs post-execution as I will show later. Additionally, if file logging is enabled in the log4j.properties file (located in the Spark configuration), the messages from the Application Master will be logged into a file rather than directed to subprocess.PIPE, so disable file logging if needed."
},
{
"code": null,
"e": 19443,
"s": 19328,
"text": "Finally, to display the output/error messages in the Python scriptβs output, I continue the code above as follows:"
},
{
"code": null,
"e": 19513,
"s": 19443,
"text": "for line in cmd_output.stdout: print(line)cmd_output.communicate()"
},
{
"code": null,
"e": 19860,
"s": 19513,
"text": "The purpose of cmd_output.communicate() is to wait for the process to finish after subprocess.PIPE is empty, i.e. no more outputs from the subordinate task are written to it. It highly advisable to read the entire queue before calling cmd_output.communicate() method as done above, to prevent the queue from increasing in size and wasting memory."
},
{
"code": null,
"e": 20286,
"s": 19860,
"text": "As I mentioned earlier, when we run tasks in parallel we do not want debug messages to be displayed; moreover, if a large number of tasks are sending messages to an in-memory FIFO queue at the same time, memory usage will increase messages arenβt being read from the queue as fast as they are generated. A version of the code from the previous section without debugging, starting with the call to spark-submit, is as follows:"
},
{
"code": null,
"e": 21186,
"s": 20286,
"text": "cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)def getYARNApplicationID(app_name): state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output([\"yarn\",\"application\",\"-list\", \"-appStates\",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split(\"\\n\")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\\t')[0] break return application_idmax_wait_time_job_start_s = 120start_time = time.time()while yarn_application_id == '' and time.time()-start_time\\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name)cmd_output.wait()if yarn_application_id == '': raise RuntimeError(\"Couldn't get yarn application ID for application %s\" % app_name)"
},
{
"code": null,
"e": 21276,
"s": 21186,
"text": "The code starts by launching the subordinate task as before, but with debugging disabled:"
},
{
"code": null,
"e": 21393,
"s": 21276,
"text": "cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)"
},
{
"code": null,
"e": 21723,
"s": 21393,
"text": "Since there are no debug messages to be displayed when the process is running, we use cmd_output.wait instead of cmd_output.communicate() to wait for the task to finish. Note that although we wonβt see the Application Masterβs messages, we can still debug the Spark jobβs driver and executor in runtime via the Hadoop cluster UI."
},
{
"code": null,
"e": 22060,
"s": 21723,
"text": "However, we still need to be able to monitor the task from a programmatic point of view; more specifically, the controller task needs to know when the subordinate task has finished, whether it was successful, and take appropriate action in case of failure. For that purpose, we can use the application name that we set in the beginning:"
},
{
"code": null,
"e": 22093,
"s": 22060,
"text": "app_name = \"some_model_training\""
},
{
"code": null,
"e": 22392,
"s": 22093,
"text": "The application name can be used by YARN to retrieve the YARN application ID, which allows us to retrieve the status and other information about the subordinate task. Again, we can resort to the subprocess library to define a function that can retrieve the application ID from the application name:"
},
{
"code": null,
"e": 22836,
"s": 22392,
"text": "def getYARNApplicationID(app_name): state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output([\"yarn\",\"application\",\"-list\", \"-appStates\",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split(\"\\n\")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\\t')[0] break return application_id"
},
{
"code": null,
"e": 23147,
"s": 22836,
"text": "Observe that getYARNApplicationID parses the output of the yarn application -list shell command. Depending on your Hadoop version the output format may be slightly different and the parsing needs to be adjusted accordingly. If in doubt, you can test the format by running the following command in the terminal:"
},
{
"code": null,
"e": 23223,
"s": 23147,
"text": "$ yarn application -list -appStates RUNNING,ACCEPTED,FINISHED,KILLED,FAILED"
},
{
"code": null,
"e": 23658,
"s": 23223,
"text": "The tricky aspect is that this method can only work if the application name is unique in the Hadoop cluster. Therefore, you need to make sure you are creating a unique application name, for instance by including timestamps, random strings, your user ID, etc. Optionally, you can also add other filters when attempting to parse the output of yarn application -list, for example, the user ID, the YARN queue name or the time of the day."
},
{
"code": null,
"e": 23790,
"s": 23658,
"text": "Since the Spark job takes some time to be registered in YARN after it has been launched using spark-submit, I implemented the loop:"
},
{
"code": null,
"e": 23997,
"s": 23790,
"text": "max_wait_time_job_start_s = 120start_time = time.time()while yarn_application_id == '' and time.time()-start_time\\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name)"
},
{
"code": null,
"e": 24143,
"s": 23997,
"text": "where max_wait_time_job_start_s is the time to wait for the registration in seconds, which may need to be adjusted according to your environment."
},
{
"code": null,
"e": 24158,
"s": 24143,
"text": "The meaning of"
},
{
"code": null,
"e": 24287,
"s": 24158,
"text": "if yarn_application_id == '': raise RuntimeError(\"Couldn't get yarn application ID for\"\\ \" application %s\" % app_name)"
},
{
"code": null,
"e": 24596,
"s": 24287,
"text": "is straightforward; if there is no application ID, it means the Spark job has not been successfully launched and we need to throw an exception. This may also indicate that we need to increase max_wait_time_job_start_s, or change how the output of yarn application -list is parsed inside getYARNApplicationID."
},
{
"code": null,
"e": 24687,
"s": 24596,
"text": "After the subordinate task has finished, checking its final status can be done as follows:"
},
{
"code": null,
"e": 25139,
"s": 24687,
"text": "def getSparkJobFinalStatus(application_id): out = subprocess.check_output([\"yarn\",\"application\", \"-status\",application_id], stderr=subprocess.DEVNULL, universal_newlines=True) status_lines = out.split(\"\\n\") state = '' for line in status_lines: if len(line) > 15 and line[1:15] == \"Final-State : \": state = line[15:] break return statefinal_status = getSparkJobFinalStatus(yarn_application_id)"
},
{
"code": null,
"e": 25420,
"s": 25139,
"text": "where again, you may need to tune the parsing of yarn application -status depending on your Hadoop version. How to handle the final status is entirely up to you, but one possibility is to store the Spark jobβs driver and executor log in a file and raise an exception. For example:"
},
{
"code": null,
"e": 25971,
"s": 25420,
"text": "log_path = \"/home/edsonaoki/logs/%s_%s.log\" % (app_name, yarn_application_id)if final_status != \"SUCCEEDED\": cmd_output = subprocess.Popen([\"yarn\",\"logs\", \"-applicationId\",yarn_application_id], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_lines=True) with open(log_path, \"w\") as f: for line in cmd_output.stdout: f.write(line) print(\"Written log of failed task to %s\" % log_path) cmd_output.communicate() raise RuntimeError(\"Task %s has not succeeded\" % app_name)"
},
{
"code": null,
"e": 26176,
"s": 25971,
"text": "If not obvious, before attempting to execute subordinate tasks in parallel, make sure to test as many as tasks as possible without parallelisation, as debugging parallel tasks can be incredibly difficult."
},
{
"code": null,
"e": 27073,
"s": 26176,
"text": "To perform parallelisation we will use Pythonβs concurrent library. The concurrent library uses multithreading and not multiprocessing; i.e. the threads do run in the same processor, such that from the side of the controller task, there is no real parallel processing. However, since the threads started in the controller task are in I/O mode (unblocked) when waiting for the subordinate tasks to finish, multiple subordinate tasks can be launched asynchronously, such that they will actually run in parallel in the side of the Hadoop cluster. While we can technically use the multiprocessing library instead of the concurrent library to achieve parallelism also from the controller taskβs side, I would advise against it as it will substantially increase the memory consumption in the client environment for little benefit β the idea is that the βtough processingβ is done in the Hadoop cluster."
},
{
"code": null,
"e": 27694,
"s": 27073,
"text": "When we launch a Spark job, we are typically aware of the constraints of processing and memory in the cluster environment, especially in the case of a shared environment, and use configuration parameters such as spark.executor.memory and spark.executor.instances in order to control the taskβs processing and memory consumption. The same needs to be done in our case; we need to limit the number of subordinate tasks that execute simultaneously according to the availability of computational resources in the cluster, such that when we reach this limit, a subordinate task can only be started after another has finished."
},
{
"code": null,
"e": 28299,
"s": 27694,
"text": "The concurrent package offers the futures.ThreadPoolExecutor class which allows us to start multiple threads and wait for them to finish. The class also allows us to limit the number of threads doing active processing(i.e. not blocked by I/O) via the max_workers argument. However, as I mentioned before, a thread in the controller task is treated as being blocked by I/O when the subordinate task is running, which means that max_workers wonβt effectively limit the number of threads. As result, all subordinate tasks will be submitted nearly simultaneously and the Hadoop cluster can become overloaded."
},
{
"code": null,
"e": 28394,
"s": 28299,
"text": "This can be solved rather easily by modifying the futures.ThreadPoolExecutor class as follows:"
},
{
"code": null,
"e": 28722,
"s": 28394,
"text": "import concurrent.futuresfrom queue import Queueclass ThreadPoolExecutorWithQueueSizeLimit( concurrent.futures.ThreadPoolExecutor): def __init__(self, maxsize, *args, **kwargs): super(ThreadPoolExecutorWithQueueSizeLimit, self).__init__(*args, **kwargs) self._work_queue = Queue(maxsize=maxsize)"
},
{
"code": null,
"e": 28996,
"s": 28722,
"text": "This new class ThreadPoolExecutorWithQueueSizeLimit works exactly like futures.ThreadPoolExecutor, but it wonβt allow more than maxsize threads to exist at any point of time, effectively limiting the number of subordinate tasks running simultaneously in the Hadoop cluster."
},
{
"code": null,
"e": 29320,
"s": 28996,
"text": "We now need to define a function, containing the execution code of the thread, which can be passed as an argument to the class ThreadPoolExecutorWithQueueSizeLimit. Based on the previous code for executing a subordinate task from Python without debugging messages, I present the following generic thread execution function:"
},
{
"code": null,
"e": 30568,
"s": 29320,
"text": "def executeThread(app_name, spark_submit_cmd, error_log_dir, max_wait_time_job_start_s=120): cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) start_time = time.time() while yarn_application_id == '' and time.time()-start_time\\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name) cmd_output.wait() if yarn_application_id == '': raise RuntimeError(\"Couldn't get yarn application ID for\"\\ \"application %s\" % app_name) final_status = getSparkJobFinalStatus(yarn_application_id) log_path = %s/%s_%s.log\" % (error_log_dir, app_name, yarn_application_id) if final_status != \"SUCCEEDED\": cmd_output = subprocess.Popen([\"yarn\",\"logs\", \"-applicationId\",yarn_application_id], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_lines=True) with open(log_path, \"w\") as f: for line in cmd_output.stdout: f.write(line) print(\"Written log of failed task to %s\" % log_path) cmd_output.communicate() raise RuntimeError(\"Task %s has not succeeded\" % app_name) return True"
},
{
"code": null,
"e": 30823,
"s": 30568,
"text": "As you can see, the function uses the previously defined functions getYARNApplicationID and getSparkJobFinalStatus, and the application name, the spark-submit command line and the directory to store the error logs are passed as arguments to the function."
},
{
"code": null,
"e": 31291,
"s": 30823,
"text": "Note that the function raises an exception in case the yarn application ID cannot be found, or the status of the Spark job is not successful. But depending on the case, we may just want the function to return a False value, such that the controller task knows that this particular subordinate task has not been successful and needs to be executed again, without need to run again the tasks that have been already successful. In this case, we just need to replace line"
},
{
"code": null,
"e": 31376,
"s": 31291,
"text": "raise RuntimeError(\"Couldn't get yarn application ID for application %s\" % app_name)"
},
{
"code": null,
"e": 31380,
"s": 31376,
"text": "and"
},
{
"code": null,
"e": 31439,
"s": 31380,
"text": "raise RuntimeError(\"Task %s has not succeeded\" % app_name)"
},
{
"code": null,
"e": 31444,
"s": 31439,
"text": "with"
},
{
"code": null,
"e": 31457,
"s": 31444,
"text": "return False"
},
{
"code": null,
"e": 31563,
"s": 31457,
"text": "The next step is to create a generic code to start the threads and wait for their completion, as follows:"
},
{
"code": null,
"e": 32635,
"s": 31563,
"text": "def executeAllThreads(dict_spark_submit_cmds, error_log_dir, dict_success_app=None): if dict_success_app is None: dict_success_app = {app_name: False for app_name in dict_spark_submit_cmds.keys()} with ThreadPoolExecutorWithQueueSizeLimit(maxsize=max_parallel, max_workers=max_parallel) as executor: future_to_app_name = { executor.submit( executeThread, app_name, spark_submit_cmd, error_log_dir, ): app_name for app_name, spark_submit_cmd in dict_spark_submit_cmds.items() if dict_success_app[app_name] == False } for future in concurrent.futures\\ .as_completed(future_to_app_name): app_name = future_to_app_name[future] try: dict_success_app[app_name] = future.result() except Exception as exc: print('Subordinate task %s generated exception %s' % (app_name, exc)) raise return dict_success_app"
},
{
"code": null,
"e": 32680,
"s": 32635,
"text": "The mandatory arguments to the function are:"
},
{
"code": null,
"e": 32786,
"s": 32680,
"text": "a dictionary with application names as keys and the corresponding job submission command lines as values;"
},
{
"code": null,
"e": 32825,
"s": 32786,
"text": "the directory to store the error logs."
},
{
"code": null,
"e": 33226,
"s": 32825,
"text": "The output of the function is also a dictionary containing the return value (True or False) of each subordinate task, indexed by application name. The optional argument is dict_success_app, that can be the return value from a previous execution from the function, in case we only want to run the subordinate tasks that have not been already successful. I will show later how that can be accomplished."
},
{
"code": null,
"e": 33329,
"s": 33226,
"text": "For the readerβs convenience, I put together the complete code of the parallelisation framework below:"
},
{
"code": null,
"e": 37109,
"s": 33329,
"text": "import subprocessimport concurrent.futuresfrom queue import Queueclass ThreadPoolExecutorWithQueueSizeLimit( concurrent.futures.ThreadPoolExecutor): def __init__(self, maxsize, *args, **kwargs): super(ThreadPoolExecutorWithQueueSizeLimit, self).__init__(*args, **kwargs) self._work_queue = Queue(maxsize=maxsize)def getYARNApplicationID(app_name): state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output([\"yarn\",\"application\",\"-list\", \"-appStates\",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split(\"\\n\")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\\t')[0] break return application_iddef getSparkJobFinalStatus(application_id): out = subprocess.check_output([\"yarn\",\"application\", \"-status\",application_id], stderr=subprocess.DEVNULL, universal_newlines=True) status_lines = out.split(\"\\n\") state = '' for line in status_lines: if len(line) > 15 and line[1:15] == \"Final-State : \": state = line[15:] break return statedef executeThread(app_name, spark_submit_cmd, error_log_dir, max_wait_time_job_start_s = 120): cmd_output = subprocess.Popen(spark_submit_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) start_time = time.time() while yarn_application_id == '' and time.time()-start_time\\ < max_wait_time_job_start_s: yarn_application_id = getYARNApplicationID(app_name) cmd_output.wait() if yarn_application_id == '': raise RuntimeError(\"Couldn't get yarn application ID for\"\\ \" application %s\" % (app_name)) # Replace line above by the following if you do not # want a failed task to stop the entire process: # return False final_status = getSparkJobFinalStatus(yarn_application_id) log_path = %s/%s_%s.log\" % (error_log_dir, app_name, yarn_application_id) if final_status != \"SUCCEEDED\": cmd_output = subprocess.Popen([\"yarn\",\"logs\", \"-applicationId\",yarn_application_id], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_lines=True) with open(log_path, \"w\") as f: for line in cmd_output.stdout: f.write(line) print(\"Written log of failed task to %s\" % log_path) cmd_output.communicate() raise RuntimeError(\"Task %s has not succeeded\" % app_name) # Replace line above by the following if you do not # want a failed task to stop the entire process: # return False return Truedef executeAllThreads(dict_spark_submit_cmds, error_log_dir, dict_success_app=None): if dict_success_app is None: dict_success_app = {app_name: False for app_name in dict_spark_submit_cmds.keys()} with ThreadPoolExecutorWithQueueSizeLimit(maxsize=max_parallel, max_workers=max_parallel) as executor: future_to_app_name = { executor.submit( executeThread, app_name, spark_submit_cmd, error_log_dir, ): app_name for app_name, spark_submit_cmd in dict_spark_submit_cmds.items() if dict_success_app[app_name] == False } for future in concurrent.futures\\ .as_completed(future_to_app_name): app_name = future_to_app_name[future] try: dict_success_app[app_name] = future.result() except Exception as exc: print('Subordinate task %s generated exception %s' % (app_name, exc)) raise return dict_success_app"
},
{
"code": null,
"e": 37588,
"s": 37109,
"text": "In this example, I will show how to use the framework above to parallelise training of a multi-label classifier with hundreds of labels. Basically, we will train multiple binary classifiers in parallel, where the training of each binary model is itself parallelised via Spark. The individual binary classifiers are Gradient Boosting models trained using the Spark version of the popular LightGBM package, contained in the Microsoft Machine Learning for Spark (MMLSpark) library."
},
{
"code": null,
"e": 37688,
"s": 37588,
"text": "By using the framework above, there are only two other things that the controller task needs to do:"
},
{
"code": null,
"e": 37959,
"s": 37688,
"text": "Prior to calling the executeAllThreads function, set up the application name and spark-submit command for each subordinate task;After returning from the executeAllThreads function, check which subordinate tasks have been successful and handle their output appropriately."
},
{
"code": null,
"e": 38088,
"s": 37959,
"text": "Prior to calling the executeAllThreads function, set up the application name and spark-submit command for each subordinate task;"
},
{
"code": null,
"e": 38231,
"s": 38088,
"text": "After returning from the executeAllThreads function, check which subordinate tasks have been successful and handle their output appropriately."
},
{
"code": null,
"e": 38353,
"s": 38231,
"text": "For the first part, we can start by looking at our previous example where we are submitting a standalone subordinate job:"
},
{
"code": null,
"e": 39039,
"s": 38353,
"text": "spark_config_cluster_path = \"/home/edsonaoki/spark_config_cluster\"app_name = \"some_model_training\"spark_config = { \"spark.jars.packages\" : \"com.microsoft.ml.spark:mmlspark_2.11:0.18.1\", \"spark.dynamicAllocation.enabled\": \"false\", \"spark.executor.instances\": \"10\", \"spark.yarn.dist.files\": \"/home/edsonaoki/custom_packages.tar\"}command = \"lightgbm_training.py \"\\ \"hdfs://user/edsonaoki/datasets/input_data.parquet\"\\ \"hdfs://user/edsonaoki/models\"spark_submit_cmd = \"SPARK_CONF_DIR=%s spark-submit -name %s %s %s\" % (spark_config_cluster_path, app_name, \" \".join(['-conf %s=\"%s\"' % (key, value) for key, value in spark_config.items()]), command)"
},
{
"code": null,
"e": 39402,
"s": 39039,
"text": "What do we need to adapt the code for multi-label classification? First, for the reasons already mentioned, the application name needs to be completely unique. Assuming that the label columns of the dataset input_data.parquet are contained in a variable lst_labels, one way to ensure likely unique applications IDs for each subordinate task would something like:"
},
{
"code": null,
"e": 39550,
"s": 39402,
"text": "import timecurr_timestamp = int(time.time()*1000)app_names = [\"model_training_%s_%d\" % (label,curr_timestamp) for label in lst_labels]"
},
{
"code": null,
"e": 39848,
"s": 39550,
"text": "This ensures that application names will be unique as long as the controller task is not started more once in the same millisecond (of course, if we have a shared YARN cluster other adaptions may be needed to make the application names unique, such as adding the username to the application name)."
},
{
"code": null,
"e": 39968,
"s": 39848,
"text": "We are yet to discuss how the subordinate task code contained in lightgbm_training.py looks like, but letβs suppose it:"
},
{
"code": null,
"e": 40168,
"s": 39968,
"text": "Performs some pre-processing on the training data, based on the label column (such as dataset balancing), using a function contained in the custom_packages.tar file submitted along with the Spark job"
},
{
"code": null,
"e": 40235,
"s": 40168,
"text": "Trains the model based on the features column and the label column"
},
{
"code": null,
"e": 40278,
"s": 40235,
"text": "Saves the trained model in the HDFS system"
},
{
"code": null,
"e": 40545,
"s": 40278,
"text": "In this case, the controller task needs to pass the HDFS path of the training dataset, the HDFS path to store the trained models, and the label to be used for each subordinate task, via command-line arguments to lightgbm_training.py. This can be done as shown below:"
},
{
"code": null,
"e": 41085,
"s": 40545,
"text": "dict_spark_submit_cmds = dict()for i in range(len(lst_labels)): command = \"lightgbm_training.py \"\\ \"hdfs://user/edsonaoki/datasets/input_data.parquet \"\\ \"hdfs://user/edsonaoki/models \"\\ +lst_labels[i] spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s \"\\ \"%s %s\" % (spark_config_cluster_path, app_names[i], \" \".join(['-conf %s=\"%s\"' % (key, value) for key, value in spark_config.items()]), command) dict_spark_submit_cmds[app_names[i]] = spark_submit_cmd"
},
{
"code": null,
"e": 41451,
"s": 41085,
"text": "Of course, there are many other ways to customise the subordinate tasks. We might want to use different model training hyperparameters, different datasets, different Spark configurations, or even use different Python scripts for each subordinate task. The fact that we allow the spark-submit command line to be unique for each subtask allows complete customisation."
},
{
"code": null,
"e": 41569,
"s": 41451,
"text": "For the readerβs convenience, I put together the controller taskβs code prior to and until calling executeAllThreads:"
},
{
"code": null,
"e": 42636,
"s": 41569,
"text": "import timespark_config_cluster_path = \"/home/edsonaoki/spark_config_cluster\"curr_timestamp = int(time.time()*1000)app_names = [\"model_training_%s_%d\" % (label,curr_timestamp) for label in lst_labels]spark_config = { \"spark.jars.packages\" : \"com.microsoft.ml.spark:mmlspark_2.11:0.18.1\", \"spark.dynamicAllocation.enabled\": \"false\", \"spark.executor.instances\": \"10\", \"spark.yarn.dist.files\": \"/home/edsonaoki/custom_packages.tar\"}dict_spark_submit_cmds = dict()for i in range(len(lst_labels)): command = \"lightgbm_training.py \"\\ \"hdfs://user/edsonaoki/datasets/input_data.parquet \"\\ \"hdfs://user/edsonaoki/models \"\\ +lst_labels[i] spark_submit_cmd = βSPARK_CONF_DIR=%s spark-submit -name %s \"\\ \"%s %s\" % (spark_config_cluster_path, app_names[i], \" \".join(['-conf %s=\"%s\"' % (key, value) for key, value in spark_config.items()]), command) dict_spark_submit_cmds[app_names[i]] = spark_submit_cmdexecuteAllThreads(dict_spark_submit_cmds, \"/home/edsonaoki/logs\")"
},
{
"code": null,
"e": 43009,
"s": 42636,
"text": "For the second part, i.e. what the controller task should do after returning from executeAllThreads, assuming that the successful tasks have saved the trained models in the HDFS system, we can just open these files and process them as appropriate, for instance applying the models to some appropriate validation dataset, generating plots and computing performance metrics."
},
{
"code": null,
"e": 43535,
"s": 43009,
"text": "If we use the parallelisation framework presented earlier as it is, there wonβt be βunsuccessful subordinate tasksβ as any failure will result in an exception being raised. But if we modified executeThread to return False in case of task failure, we might store the returning dict_success_app dictionary in a JSON or Pickle file such that we can later investigate and fix the failed tasks. Finally, we can call again executeAllThreads with the optional argument dict_success_app set such that we re-run only the failed tasks."
},
{
"code": null,
"e": 43807,
"s": 43535,
"text": "Let us now write the code of the subordinate task in the lightgbm_training.py script. The first step is to read the input arguments of the script, i.e. the path of the training dataset in the HDFS filesystem, the path to store the models and the name of the label column:"
},
{
"code": null,
"e": 43890,
"s": 43807,
"text": "import systrain_data_path = sys.argv[1]model_path = sys.argv[2]label = sys.argv[3]"
},
{
"code": null,
"e": 43999,
"s": 43890,
"text": "Since we are using the Spark version of LightGBM, we need to create a Spark session, which we do as follows:"
},
{
"code": null,
"e": 44131,
"s": 43999,
"text": "from pyspark.sql import SparkSessionspark = SparkSession.builder.getOrCreate()spark.sparkContext.addPyFile(\"./custom_packages.tar\")"
},
{
"code": null,
"e": 44580,
"s": 44131,
"text": "Note that there is no need to set up any configuration for the Spark session, as it has been already done in the command line submitted by the controller task. Also, since we explicitly submitted a custom Python package custom_packages.tar to the Spark job, we need to use the addPyFile function to make the contents of the package usable inside our code, as the package is not included in the PYTHONPATH environment variable of the Hadoop cluster."
},
{
"code": null,
"e": 44937,
"s": 44580,
"text": "The code that does the actual processing in the subordinate task is pretty straightforward. The subordinate task will read the training data, call some pre-processing function inside custom_packages.tar (say custom_data_preprocessing.datasetBalancing), perform the model training, and save the trained model with a unique name back in the HDFS file system:"
},
{
"code": null,
"e": 45516,
"s": 44937,
"text": "from custom_data_preprocessing import datasetBalancingfrom mmlspark import LightGBMClassifierdf_train_data = spark.read.parquet(train_data_path)df_preproc_data = datasetBalancing(df_train_data, label)untrained_model = LightGBMClassifier(learningRate=0.3, numIterations=150, numLeaves=45)\\ .setFeaturesCol(\"features\")\\ .setLabelCol(label)trained_model = untrained_model.fit(df_preproc_data)trained_model.write().overwrite()\\ .save(model_path + \"/trained_model_%s.mdl\" % label)spark.stop()"
},
{
"code": null,
"e": 45606,
"s": 45516,
"text": "The full code of lightgbm_training.py is put together below for the readerβs convenience:"
},
{
"code": null,
"e": 46398,
"s": 45606,
"text": "import systrain_data_path = sys.argv[1]model_path = sys.argv[2]label = sys.argv[3]from pyspark.sql import SparkSessionspark = SparkSession.builder.getOrCreate()spark.sparkContext.addPyFile(\"./custom_packages.tar\")from custom_data_preprocessing import datasetBalancingfrom mmlspark import LightGBMClassifierdf_train_data = spark.read.parquet(train_data_path)df_preproc_data = datasetBalancing(df_train_data, label)untrained_model = LightGBMClassifier(learningRate=0.3, numIterations=150, numLeaves=45)\\ .setFeaturesCol(\"features\")\\ .setLabelCol(label)trained_model = untrained_model.fit(df_preproc_data)trained_model.write().overwrite()\\ .save(model_path + \"/trained_model_%s.mdl\" % label)spark.stop()"
},
{
"code": null,
"e": 46835,
"s": 46398,
"text": "It is easy to see that the framework presented in this article can be re-used for various tasks other than multiple machine learning model training. A question is that may arise is whether it can be used for different cluster environments, for instance with Spark on Mesos rather than Spark on YARN. I believe so, but some adaptations are needed as the presented code relies heavily on the yarn command to monitor the subordinate tasks."
},
{
"code": null,
"e": 47256,
"s": 46835,
"text": "By using this framework, data scientists can focus more of their time on designing the data tasks, not on manually executing them for dozens or hundreds of small variations. Another advantage is that by harnessing parallelisation, the tasks can be done in much less time, or from a different perspective, without requiring multiple data scientists to work simultaneously to complete the tasks in the same amount of time."
}
] |
A simple NLP application for ambiguity resolution | by Laura Gorrieri | Towards Data Science
|
Ambiguity is one of the biggest challenges in NLP. When trying to understand the meaning of a word we consider several different aspects, such as the context in which it is used, our own knowledge of the world, and how a given word is generally used in society. Words change meaning over time and can also mean one thing in a certain domain and another in a different one. This phenomenon can be observed in homographs β two words that happen to be written in the same way, usually coming from different etymologies β and polysemy β one word that carries different meanings.In this tutorial, weβll see how to resolve ambiguity in PoS tagging and semantic tagging, using expert.ai technology.
Please check how to install expert.ai NL API python SDK, either on this Towards Data Science article or on the official documentation, here.
Language is ambiguous: not only a sentence could be written in different ways and still convey the same meaning, but even lemmas β a concept that is supposed to be far less ambiguous β can carry different meanings.
For example, the word play could refer to several different things. Letβs take a look at the following examples:I really enjoyed the play.Iβm in a band and I play the guitar.
Not only the same word can have different meanings, but it can be used in different roles: in the first sentence, play is a noun, while in the second itβs a verb. Assigning the correct grammatical label to each token is called PoS (Part of Speech) tagging and itβs not a piece of cake.
Letβs see how to resolve PoS ambiguity with expert.ai β first, letβs import the library and create the client:
Weβll see the PoS tagging for two sentences β notice how the lemma key is the same in both sentences, while its PoS changes:
To analyze each sentence we need to create a request to NL API: the most important parameters β shown in the code below as well β are the text to analyze, the language, and the analysis we are requesting, represented by the resource parameter.Please notice that expert.ai NL API currently supports five languages (en, it, es, fr, de). The resource we use is disambiguation, which performs multi-level tagging as the product of the expert.ai NLP pipeline.Without further ado, letβs create our first request:
Now we need to iterate over the PoS of the text and check which one was assigned to the lemma key:
Part of speech for "The key broke in the lock."The POS: DETkey POS: NOUNbroke in POS: VERBthe POS: DETlock POS: NOUN. POS: PUNCT
What is printed above, is a list of PoS following UD Labels, where NOUN indicates that the lemma key is here used as a noun. This should not be the case for its homograph that we see in the second sentence, in which key is used as an adjective:
Part of speech for "The key problem was not one of quality but of quantity."The POS: DETkey POS: ADJproblem POS: NOUNwas POS: AUXnot POS: PARTone POS: NUMof POS: ADPquality POS: NOUNbut POS: CCONJof POS: ADPquantity POS: NOUN. POS: PUNCT
As you can see printed above, the lemma key was correctly recognized as an adjective in this sentence.
One word can also have the same grammatical label and have different meanings. This phenomenon is called polysemy. Being able to infer the correct meaning for each word is to perform semantic tagging.
Words that are more common tend to have more meanings that have been added to them in time. For example, the lemma paper can have multiple meanings, as seen here:I like to take notes on paper.Every morning my husband reads the news from the local paper.
Pointing out the correct meaning of every single lemma is an important task, as one document could change meaning or focus based on that. To do so, we must rely on technology that is well developed and robust, since semantic tagging heavily depends on many pieces of information that come from the text.
For semantic tagging IDs are often used: these IDs are identifiers of concepts, and each concept will have its own ID. For the same lemma, e.g. paper, we will have a certain id x for its meaning as a material, and another y for its meaning as a newspaper.These IDs are usually stored in a Knowledge Graph, that is a graph in which each node is a concept and the arches are the connections between concepts that follow a certain logic (e.g. an arch could link two concepts if one is the hyponym of the other).Letβs now look at how expert.ai performs semantic tagging. We begin by choosing the sentences from which we will compare the two lemmas solution:
And now the request for the first sentence β using the same parameters as the previous example:
Semantic information is found in the syncon attribute for each token: a syncon is a concept, that is stored in expert.aiβs Knowledge Graph; each concept is formed by one or more lemmas, which are synonyms.Letβs see how the information is presented in the document object:
Semantic tagging for "Work out the solution in your head."Work out CONCEPT_ID: 63784the CONCEPT_ID: -1solution CONCEPT_ID: 25789in CONCEPT_ID: -1your CONCEPT_ID: -1head CONCEPT_ID: 104906. CONCEPT_ID: -1
Each token has its own syncon, whereas some of them present -1 as concept id: this is the default ID assigned to tokens that do not have any concept, such as punctuation or articles.So, if for the previous sentence we obtain concept id 25789 for the lemma solution, for the second sentence we should obtain another one since the two lemmas have a different meaning in the two sentences:
Semantic tagging for "Heat the chlorine solution to 75Β° Celsius."Heat CONCEPT_ID: 64278the CONCEPT_ID: -1chlorine CONCEPT_ID: 59954solution CONCEPT_ID: 59795to CONCEPT_ID: -175 CONCEPT_ID: -1Β° Celsius CONCEPT_ID: 56389. CONCEPT_ID: -1
As expected, the lemma solution corresponds to a different concept id, indicating that the lemma used has a different meaning from the previous sentence.
Please find this article as a notebook on GitHub.
NLP is hard because language is ambiguous: one word, one phrase, or one sentence can mean different things depending on the context. With technologies such as expert.ai, we can solve ambiguity and build solutions that are more accurate when dealing with the meaning of words.
|
[
{
"code": null,
"e": 864,
"s": 172,
"text": "Ambiguity is one of the biggest challenges in NLP. When trying to understand the meaning of a word we consider several different aspects, such as the context in which it is used, our own knowledge of the world, and how a given word is generally used in society. Words change meaning over time and can also mean one thing in a certain domain and another in a different one. This phenomenon can be observed in homographs β two words that happen to be written in the same way, usually coming from different etymologies β and polysemy β one word that carries different meanings.In this tutorial, weβll see how to resolve ambiguity in PoS tagging and semantic tagging, using expert.ai technology."
},
{
"code": null,
"e": 1005,
"s": 864,
"text": "Please check how to install expert.ai NL API python SDK, either on this Towards Data Science article or on the official documentation, here."
},
{
"code": null,
"e": 1220,
"s": 1005,
"text": "Language is ambiguous: not only a sentence could be written in different ways and still convey the same meaning, but even lemmas β a concept that is supposed to be far less ambiguous β can carry different meanings."
},
{
"code": null,
"e": 1395,
"s": 1220,
"text": "For example, the word play could refer to several different things. Letβs take a look at the following examples:I really enjoyed the play.Iβm in a band and I play the guitar."
},
{
"code": null,
"e": 1681,
"s": 1395,
"text": "Not only the same word can have different meanings, but it can be used in different roles: in the first sentence, play is a noun, while in the second itβs a verb. Assigning the correct grammatical label to each token is called PoS (Part of Speech) tagging and itβs not a piece of cake."
},
{
"code": null,
"e": 1792,
"s": 1681,
"text": "Letβs see how to resolve PoS ambiguity with expert.ai β first, letβs import the library and create the client:"
},
{
"code": null,
"e": 1917,
"s": 1792,
"text": "Weβll see the PoS tagging for two sentences β notice how the lemma key is the same in both sentences, while its PoS changes:"
},
{
"code": null,
"e": 2424,
"s": 1917,
"text": "To analyze each sentence we need to create a request to NL API: the most important parameters β shown in the code below as well β are the text to analyze, the language, and the analysis we are requesting, represented by the resource parameter.Please notice that expert.ai NL API currently supports five languages (en, it, es, fr, de). The resource we use is disambiguation, which performs multi-level tagging as the product of the expert.ai NLP pipeline.Without further ado, letβs create our first request:"
},
{
"code": null,
"e": 2523,
"s": 2424,
"text": "Now we need to iterate over the PoS of the text and check which one was assigned to the lemma key:"
},
{
"code": null,
"e": 2720,
"s": 2523,
"text": "Part of speech for \"The key broke in the lock.\"The \tPOS: DETkey \tPOS: NOUNbroke in \tPOS: VERBthe \tPOS: DETlock \tPOS: NOUN. \tPOS: PUNCT"
},
{
"code": null,
"e": 2965,
"s": 2720,
"text": "What is printed above, is a list of PoS following UD Labels, where NOUN indicates that the lemma key is here used as a noun. This should not be the case for its homograph that we see in the second sentence, in which key is used as an adjective:"
},
{
"code": null,
"e": 3338,
"s": 2965,
"text": "Part of speech for \"The key problem was not one of quality but of quantity.\"The \tPOS: DETkey \tPOS: ADJproblem \tPOS: NOUNwas \tPOS: AUXnot \tPOS: PARTone \tPOS: NUMof \tPOS: ADPquality \tPOS: NOUNbut \tPOS: CCONJof \tPOS: ADPquantity \tPOS: NOUN. \tPOS: PUNCT"
},
{
"code": null,
"e": 3441,
"s": 3338,
"text": "As you can see printed above, the lemma key was correctly recognized as an adjective in this sentence."
},
{
"code": null,
"e": 3642,
"s": 3441,
"text": "One word can also have the same grammatical label and have different meanings. This phenomenon is called polysemy. Being able to infer the correct meaning for each word is to perform semantic tagging."
},
{
"code": null,
"e": 3896,
"s": 3642,
"text": "Words that are more common tend to have more meanings that have been added to them in time. For example, the lemma paper can have multiple meanings, as seen here:I like to take notes on paper.Every morning my husband reads the news from the local paper."
},
{
"code": null,
"e": 4200,
"s": 3896,
"text": "Pointing out the correct meaning of every single lemma is an important task, as one document could change meaning or focus based on that. To do so, we must rely on technology that is well developed and robust, since semantic tagging heavily depends on many pieces of information that come from the text."
},
{
"code": null,
"e": 4854,
"s": 4200,
"text": "For semantic tagging IDs are often used: these IDs are identifiers of concepts, and each concept will have its own ID. For the same lemma, e.g. paper, we will have a certain id x for its meaning as a material, and another y for its meaning as a newspaper.These IDs are usually stored in a Knowledge Graph, that is a graph in which each node is a concept and the arches are the connections between concepts that follow a certain logic (e.g. an arch could link two concepts if one is the hyponym of the other).Letβs now look at how expert.ai performs semantic tagging. We begin by choosing the sentences from which we will compare the two lemmas solution:"
},
{
"code": null,
"e": 4950,
"s": 4854,
"text": "And now the request for the first sentence β using the same parameters as the previous example:"
},
{
"code": null,
"e": 5222,
"s": 4950,
"text": "Semantic information is found in the syncon attribute for each token: a syncon is a concept, that is stored in expert.aiβs Knowledge Graph; each concept is formed by one or more lemmas, which are synonyms.Letβs see how the information is presented in the document object:"
},
{
"code": null,
"e": 5501,
"s": 5222,
"text": "Semantic tagging for \"Work out the solution in your head.\"Work out \tCONCEPT_ID: 63784the \tCONCEPT_ID: -1solution \tCONCEPT_ID: 25789in \tCONCEPT_ID: -1your \tCONCEPT_ID: -1head \tCONCEPT_ID: 104906. \tCONCEPT_ID: -1"
},
{
"code": null,
"e": 5888,
"s": 5501,
"text": "Each token has its own syncon, whereas some of them present -1 as concept id: this is the default ID assigned to tokens that do not have any concept, such as punctuation or articles.So, if for the previous sentence we obtain concept id 25789 for the lemma solution, for the second sentence we should obtain another one since the two lemmas have a different meaning in the two sentences:"
},
{
"code": null,
"e": 6206,
"s": 5888,
"text": "Semantic tagging for \"Heat the chlorine solution to 75Β° Celsius.\"Heat \tCONCEPT_ID: 64278the \tCONCEPT_ID: -1chlorine \tCONCEPT_ID: 59954solution \tCONCEPT_ID: 59795to \tCONCEPT_ID: -175 \tCONCEPT_ID: -1Β° Celsius \tCONCEPT_ID: 56389. \tCONCEPT_ID: -1"
},
{
"code": null,
"e": 6360,
"s": 6206,
"text": "As expected, the lemma solution corresponds to a different concept id, indicating that the lemma used has a different meaning from the previous sentence."
},
{
"code": null,
"e": 6410,
"s": 6360,
"text": "Please find this article as a notebook on GitHub."
}
] |
Python Basic date and time types
|
To manipulate dates and times in the python there is a module called datetime. There are two types of date and time objects. The types are naiΜve and the aware.
In the naiΜve object, there is no enough information to unambiguously locate this object from other date-time objects. In this approach it uses Coordinate Universal Time (UTC).
In the aware type objects there are different information regarding algorithmic and political time adjustments. This type of objects is used to represent some specific time moments.
To use this module, we should import it using β
import datetime
There are different classes, constants and methods in this module.
The constants are β
datetime.MINYEAR
It is the smallest Year number, which can be applied as date or datetime objects. The value is 0
datetime.MAXYEAR
It is the largest Year number, which can be applied as date or datetime objects. The value is 9999
The Available datatypes are β
date
It is date type object. It uses Gregorian calendar. It has year, month, day attributes.
time
It is a time object class. It is independent of any particular day. It has hour, minute, second, microsecond and tzinfo attributes.
datetime
It is a combined set of dates and times.
timedelta
It is used to express the difference between two date, time or datetime values in milliseconds.
tzinfo
It is an Abstract Base Class. It holds the time zone information. It is used by the datetime and time classes.
timezone
In this class, it implements tzinfo. There is a fixed offset from the UTC
The date objects represent a date. In the date there are Day, month and the Year part. It uses the Gregorian Calendar. According to this calendar the day of January 1 of Year 1 is called as the day number 1, and so on.
Some date related methods are β
This is the constructor to create a date type object. To create a date, all arguments are required as integer type data. The year must be in range MINYEAR & MAXYEAR. If the given date is not valid, it will raise ValueError.
This method is used to return the current local date.
This method is used to get the date from POSIX timestamp. If the timestamp value is out of range, it will raise OverflowError.
This method is used to get the date from proleptic Gregorian Calendar ordinal. It is used to get the date from the date count from January 1 of Year 1.
This method is used to return a date to proleptic Gregorian Calendar ordinal.
This method is used to return the date of a week as an integer from the date. The Monday is 0, Tuesday is 1 and so on.
This method is used to return the date as an ISO 8601 format string. The format is YYYY-MM-DD.
Live Demo
import datetime as dt
new_date = dt.date(1998, 9, 5) #Store date 5th septemberm, 1998
print("The Date is: " + str(new_date))
print("Ordinal value of given date: " + str(new_date.toordinal()))
print("The weekday of the given date: " + str(new_date.weekday())) #Monday is 0
my_date = dt.date.fromordinal(732698) #Create a date from the Ordinal value.
print("The Date from ordinal is: " + str(my_date))
td = my_date - new_date #Create a timedelta object
print('td Type: ' + str(type(td)) + '\nDifference: ' + str(td))
The Date is: 1998-09-05
Ordinal value of given date: 729637
The weekday of the given date: 5
The Date from ordinal is: 2007-01-22
td Type: <class 'datetime.timedelta'>
Difference: 3061 days, 0:00:00
The time object represents a local time. In the time there are hour, minute second, microsecond, tzinfo part. The hour will be in range 0 to 24 and the minute and second will be in range 0 to 60, and microseconds will be in range 0 to 1000000.
Some time related methods are
This method is used to get time from an ISO 8601 string. It can take any of the output of time.isoformat() method.
This method is used to return a time by taking values from the arguments. If no argument is passed, it will return the same time object values.
This method is used to return the name of the time zone. If the tzinfo is None, it will return None.
The datetime object holds both date and time. As date object, it supports Gregorian Calendar and as Time object it holds exactly 3600*24 seconds for each day.
It supports all date and time related methods, some methods are also present for datetime. These are like β
This method is used to get the current date and time. If the tz is not present or None, then, it will return date like the today() method.
This method is used to get the current UTC date and time related information.
There are another two methods called strftime() and strptime(). These methods are applicable for both date and time objects, as well as datetime objects.
The method strftime() converts a tuple or struct_time representing a time as returned by gmtime() or localtime() to a string as specified by the format argument. If t is not provided, the current time as returned by localtime() is used. format must be a string. An exception ValueError is raised if any field in t is outside of the allowed range.
The method strptime() parses a string representing a time according to a format. The return value is a struct_time as returned by gmtime() or localtime(). The format parameter uses the same directives as those used by strftime(); it defaults to "%a %b %d %H:%M:%S %Y" which matches the formatting returned by ctime().
These two methods use some directives. Some of them are listed below β
%A
Full Weekday name
%B
Full Month Name
%d
Day of the month (0 to 31)
%S
Second
%G
4 digit Year, corresponding to ISO week number
%m
Month (1 to 12)
%M
Minute
%T
Current time, equal to %H:%M:%S
%W
Week number of the current year, starting with the first Monday as the first day of the first week
%w
Day of the week as a decimal, Sunday=0
%Y
Year including the century
%Z or %z
Time zone or name or abbreviation
Live Demo
import datetime as dt
my_date1 = dt.datetime(2015, 1, 4) #Storing the date 4th Jan, 2015
print(my_date1)
print('The Weekday of that day was: ' + my_date1.strftime('%A'))
my_date2 = dt.datetime.strptime('August-15-2017', '%B-%d-%Y') #Storing the date 15th Aug, 2017
print(my_date2)
print('The Weekday of that day was: ' + my_date2.strftime('%A'))
print('The difference between two days: ' + str(abs(my_date1 - my_date2)))
2015-01-04 00:00:00
The Weekday of that day was: Sunday
2017-08-15 00:00:00
The Weekday of that day was: Tuesday
The difference between two days: 954 days, 0:00:00
|
[
{
"code": null,
"e": 1223,
"s": 1062,
"text": "To manipulate dates and times in the python there is a module called datetime. There are two types of date and time objects. The types are naiΜve and the aware."
},
{
"code": null,
"e": 1400,
"s": 1223,
"text": "In the naiΜve object, there is no enough information to unambiguously locate this object from other date-time objects. In this approach it uses Coordinate Universal Time (UTC)."
},
{
"code": null,
"e": 1582,
"s": 1400,
"text": "In the aware type objects there are different information regarding algorithmic and political time adjustments. This type of objects is used to represent some specific time moments."
},
{
"code": null,
"e": 1630,
"s": 1582,
"text": "To use this module, we should import it using β"
},
{
"code": null,
"e": 1647,
"s": 1630,
"text": "import datetime\n"
},
{
"code": null,
"e": 1714,
"s": 1647,
"text": "There are different classes, constants and methods in this module."
},
{
"code": null,
"e": 1734,
"s": 1714,
"text": "The constants are β"
},
{
"code": null,
"e": 1751,
"s": 1734,
"text": "datetime.MINYEAR"
},
{
"code": null,
"e": 1848,
"s": 1751,
"text": "It is the smallest Year number, which can be applied as date or datetime objects. The value is 0"
},
{
"code": null,
"e": 1865,
"s": 1848,
"text": "datetime.MAXYEAR"
},
{
"code": null,
"e": 1964,
"s": 1865,
"text": "It is the largest Year number, which can be applied as date or datetime objects. The value is 9999"
},
{
"code": null,
"e": 1994,
"s": 1964,
"text": "The Available datatypes are β"
},
{
"code": null,
"e": 1999,
"s": 1994,
"text": "date"
},
{
"code": null,
"e": 2087,
"s": 1999,
"text": "It is date type object. It uses Gregorian calendar. It has year, month, day attributes."
},
{
"code": null,
"e": 2092,
"s": 2087,
"text": "time"
},
{
"code": null,
"e": 2224,
"s": 2092,
"text": "It is a time object class. It is independent of any particular day. It has hour, minute, second, microsecond and tzinfo attributes."
},
{
"code": null,
"e": 2233,
"s": 2224,
"text": "datetime"
},
{
"code": null,
"e": 2274,
"s": 2233,
"text": "It is a combined set of dates and times."
},
{
"code": null,
"e": 2284,
"s": 2274,
"text": "timedelta"
},
{
"code": null,
"e": 2380,
"s": 2284,
"text": "It is used to express the difference between two date, time or datetime values in milliseconds."
},
{
"code": null,
"e": 2387,
"s": 2380,
"text": "tzinfo"
},
{
"code": null,
"e": 2498,
"s": 2387,
"text": "It is an Abstract Base Class. It holds the time zone information. It is used by the datetime and time classes."
},
{
"code": null,
"e": 2507,
"s": 2498,
"text": "timezone"
},
{
"code": null,
"e": 2581,
"s": 2507,
"text": "In this class, it implements tzinfo. There is a fixed offset from the UTC"
},
{
"code": null,
"e": 2800,
"s": 2581,
"text": "The date objects represent a date. In the date there are Day, month and the Year part. It uses the Gregorian Calendar. According to this calendar the day of January 1 of Year 1 is called as the day number 1, and so on."
},
{
"code": null,
"e": 2832,
"s": 2800,
"text": "Some date related methods are β"
},
{
"code": null,
"e": 3056,
"s": 2832,
"text": "This is the constructor to create a date type object. To create a date, all arguments are required as integer type data. The year must be in range MINYEAR & MAXYEAR. If the given date is not valid, it will raise ValueError."
},
{
"code": null,
"e": 3110,
"s": 3056,
"text": "This method is used to return the current local date."
},
{
"code": null,
"e": 3237,
"s": 3110,
"text": "This method is used to get the date from POSIX timestamp. If the timestamp value is out of range, it will raise OverflowError."
},
{
"code": null,
"e": 3389,
"s": 3237,
"text": "This method is used to get the date from proleptic Gregorian Calendar ordinal. It is used to get the date from the date count from January 1 of Year 1."
},
{
"code": null,
"e": 3467,
"s": 3389,
"text": "This method is used to return a date to proleptic Gregorian Calendar ordinal."
},
{
"code": null,
"e": 3586,
"s": 3467,
"text": "This method is used to return the date of a week as an integer from the date. The Monday is 0, Tuesday is 1 and so on."
},
{
"code": null,
"e": 3681,
"s": 3586,
"text": "This method is used to return the date as an ISO 8601 format string. The format is YYYY-MM-DD."
},
{
"code": null,
"e": 3692,
"s": 3681,
"text": " Live Demo"
},
{
"code": null,
"e": 4207,
"s": 3692,
"text": "import datetime as dt\nnew_date = dt.date(1998, 9, 5) #Store date 5th septemberm, 1998\nprint(\"The Date is: \" + str(new_date))\nprint(\"Ordinal value of given date: \" + str(new_date.toordinal()))\nprint(\"The weekday of the given date: \" + str(new_date.weekday())) #Monday is 0\nmy_date = dt.date.fromordinal(732698) #Create a date from the Ordinal value.\nprint(\"The Date from ordinal is: \" + str(my_date))\ntd = my_date - new_date #Create a timedelta object\nprint('td Type: ' + str(type(td)) + '\\nDifference: ' + str(td))"
},
{
"code": null,
"e": 4407,
"s": 4207,
"text": "The Date is: 1998-09-05\nOrdinal value of given date: 729637\nThe weekday of the given date: 5\nThe Date from ordinal is: 2007-01-22\ntd Type: <class 'datetime.timedelta'>\nDifference: 3061 days, 0:00:00\n"
},
{
"code": null,
"e": 4651,
"s": 4407,
"text": "The time object represents a local time. In the time there are hour, minute second, microsecond, tzinfo part. The hour will be in range 0 to 24 and the minute and second will be in range 0 to 60, and microseconds will be in range 0 to 1000000."
},
{
"code": null,
"e": 4681,
"s": 4651,
"text": "Some time related methods are"
},
{
"code": null,
"e": 4796,
"s": 4681,
"text": "This method is used to get time from an ISO 8601 string. It can take any of the output of time.isoformat() method."
},
{
"code": null,
"e": 4940,
"s": 4796,
"text": "This method is used to return a time by taking values from the arguments. If no argument is passed, it will return the same time object values."
},
{
"code": null,
"e": 5041,
"s": 4940,
"text": "This method is used to return the name of the time zone. If the tzinfo is None, it will return None."
},
{
"code": null,
"e": 5200,
"s": 5041,
"text": "The datetime object holds both date and time. As date object, it supports Gregorian Calendar and as Time object it holds exactly 3600*24 seconds for each day."
},
{
"code": null,
"e": 5308,
"s": 5200,
"text": "It supports all date and time related methods, some methods are also present for datetime. These are like β"
},
{
"code": null,
"e": 5447,
"s": 5308,
"text": "This method is used to get the current date and time. If the tz is not present or None, then, it will return date like the today() method."
},
{
"code": null,
"e": 5525,
"s": 5447,
"text": "This method is used to get the current UTC date and time related information."
},
{
"code": null,
"e": 5679,
"s": 5525,
"text": "There are another two methods called strftime() and strptime(). These methods are applicable for both date and time objects, as well as datetime objects."
},
{
"code": null,
"e": 6026,
"s": 5679,
"text": "The method strftime() converts a tuple or struct_time representing a time as returned by gmtime() or localtime() to a string as specified by the format argument. If t is not provided, the current time as returned by localtime() is used. format must be a string. An exception ValueError is raised if any field in t is outside of the allowed range."
},
{
"code": null,
"e": 6344,
"s": 6026,
"text": "The method strptime() parses a string representing a time according to a format. The return value is a struct_time as returned by gmtime() or localtime(). The format parameter uses the same directives as those used by strftime(); it defaults to \"%a %b %d %H:%M:%S %Y\" which matches the formatting returned by ctime()."
},
{
"code": null,
"e": 6415,
"s": 6344,
"text": "These two methods use some directives. Some of them are listed below β"
},
{
"code": null,
"e": 6418,
"s": 6415,
"text": "%A"
},
{
"code": null,
"e": 6436,
"s": 6418,
"text": "Full Weekday name"
},
{
"code": null,
"e": 6439,
"s": 6436,
"text": "%B"
},
{
"code": null,
"e": 6455,
"s": 6439,
"text": "Full Month Name"
},
{
"code": null,
"e": 6458,
"s": 6455,
"text": "%d"
},
{
"code": null,
"e": 6485,
"s": 6458,
"text": "Day of the month (0 to 31)"
},
{
"code": null,
"e": 6488,
"s": 6485,
"text": "%S"
},
{
"code": null,
"e": 6495,
"s": 6488,
"text": "Second"
},
{
"code": null,
"e": 6498,
"s": 6495,
"text": "%G"
},
{
"code": null,
"e": 6545,
"s": 6498,
"text": "4 digit Year, corresponding to ISO week number"
},
{
"code": null,
"e": 6548,
"s": 6545,
"text": "%m"
},
{
"code": null,
"e": 6564,
"s": 6548,
"text": "Month (1 to 12)"
},
{
"code": null,
"e": 6567,
"s": 6564,
"text": "%M"
},
{
"code": null,
"e": 6574,
"s": 6567,
"text": "Minute"
},
{
"code": null,
"e": 6577,
"s": 6574,
"text": "%T"
},
{
"code": null,
"e": 6609,
"s": 6577,
"text": "Current time, equal to %H:%M:%S"
},
{
"code": null,
"e": 6612,
"s": 6609,
"text": "%W"
},
{
"code": null,
"e": 6711,
"s": 6612,
"text": "Week number of the current year, starting with the first Monday as the first day of the first week"
},
{
"code": null,
"e": 6714,
"s": 6711,
"text": "%w"
},
{
"code": null,
"e": 6753,
"s": 6714,
"text": "Day of the week as a decimal, Sunday=0"
},
{
"code": null,
"e": 6756,
"s": 6753,
"text": "%Y"
},
{
"code": null,
"e": 6783,
"s": 6756,
"text": "Year including the century"
},
{
"code": null,
"e": 6792,
"s": 6783,
"text": "%Z or %z"
},
{
"code": null,
"e": 6826,
"s": 6792,
"text": "Time zone or name or abbreviation"
},
{
"code": null,
"e": 6837,
"s": 6826,
"text": " Live Demo"
},
{
"code": null,
"e": 7258,
"s": 6837,
"text": "import datetime as dt\nmy_date1 = dt.datetime(2015, 1, 4) #Storing the date 4th Jan, 2015\nprint(my_date1)\nprint('The Weekday of that day was: ' + my_date1.strftime('%A'))\nmy_date2 = dt.datetime.strptime('August-15-2017', '%B-%d-%Y') #Storing the date 15th Aug, 2017\nprint(my_date2)\nprint('The Weekday of that day was: ' + my_date2.strftime('%A'))\nprint('The difference between two days: ' + str(abs(my_date1 - my_date2)))"
},
{
"code": null,
"e": 7423,
"s": 7258,
"text": "2015-01-04 00:00:00\nThe Weekday of that day was: Sunday\n2017-08-15 00:00:00\nThe Weekday of that day was: Tuesday\nThe difference between two days: 954 days, 0:00:00\n"
}
] |
Visualising Assembly Graphs. Visualising Assembly Graphs for... | by Vijini Mallawaarachchi | Towards Data Science
|
I have been working with assembly graphs of metagenomes for a while and I have come across many fascinating things. In this article, I will share with you some of my observations related to metagenomic assembly graphs and binning contigs obtained from those graphs. Assuming that you have a basic understanding about genome assembly (if not you can read my previous article Genome Assembly β The Holy Grail of Genome Analysis), letβs get started.
An assembly graph is used to represent the final assembly of a genome (or metagenomes). In simple terms, the assembler builds this assembly graph based on reads and their overlap information. Finally, the assembler resolves paths across the assembly graph and outputs non-branching paths as contigs.
Given below is part of the visualisation of an assembly graph obtained from the tool named Bandage.
The most common file format used to represent assembly graphs is the GFA (Graph Fragment Assembly) format. A GFA file consists of a set of sequences and tab-delimited pairs of sequences with their overlaps. You can read more about this format from http://gfa-spec.github.io/GFA-spec/GFA1.html.
As shown in Figure 2, sequences are denoted starting from βSβ and overlaps (or links) between sequences are denoted starting from βLβ. The plus (+) and minus (-) signs denote whether the original sequence or its reverse complement is considered in the overlap. The value denoted with the letter βMβ in a link refers to the overlap length. In this sample file, the overlap length is 55 base pairs.
Since the assembly graph we are talking about is already a βgraphβ, we can model sequences to be vertices and overlaps/links to be edges.
Vertices β Sequences
Edges β Overlaps between sequences
Now let us visualise a sample assembly graph using python-igraph. You can read more about visualising graph data using python-igraph from my previous article. For the ease of explanation, I will not consider the plus and minus signs in the link information. In simple terms, I will be visualising an undirected graph.
We will consider a dataset consisting of reads from two bacterial species; E. faecalis and S. aureus. We will refer to this as the ES dataset. I have already assembled this dataset using metaSPAdes assembler to obtain contigs. This contig dataset known as ES+metaSPAdes, can be found from the link here as contigs.fasta. The assembly graph file can be found as assembly_graph_with_scaffolds.gfa.
Note: metaSPAdes represents each contig as a set of segments and the assembly graph file contains details about these segments and links between these segments. Hence, when obtaining the links between the contigs, you will have to scan the prefix and suffix for each contig in the contigs.paths file and the assembly_graph_with_scaffolds.gfa file to determine the prefix or suffix of the overlapping contig.
The visualisation of the ES+metaSPAdes dataset will look as follows.
We can align the contigs to reference genomes to determine to which genome each contig belong to. For this, we can use BWA-MEM. Given below is a sample command to run BWA-MEM.
bwa mem <path to reference genome> <path to contig file> > <output path>alignment.sam
For each contig, the reference genome resulting in the longest alignment length can be considered as the contigβs source.
After determining the ground truth of the contigs, we can label the vertices (contigs) and visualise this data as shown in Figure 4.
As illustrated in Figure 5, we can see that the contigs from the two species tend to form two separate regions in the assembly graph.
Moreover, it is highly likely that contigs belonging to the same species tend to have overlaps among each other, apart from the boundary cases. We can make use of these data during binning analysis.
Here are some code snippets used to produce the images I have presented in this article.
import refrom igraph import *from collections import defaultdictpaths = {}segment_contigs = {}node_count = 0# Get contig paths from contigs.pathswith open(<path to metaSPAdes contigs.paths file>) as file: name = file.readline() path = file.readline() while name != "" and path != "": while ";" in path: path = path[:-2]+","+file.readline() start = 'NODE_' end = '_length_' contig_num = str(int(re.search('%s(.*)%s' % (start, end), name).group(1))-1) segments = path.rstrip().split(",") if contig_num not in paths: node_count += 1 paths[contig_num] = [segments[0], segments[-1]] for segment in segments: if segment not in segment_contigs: segment_contigs[segment] = set([contig_num]) else: segment_contigs[segment].add(contig_num) name = file.readline() path = file.readline()links = []links_map = defaultdict(set)# Get contig paths from contigs.pathswith open(<path to metaSPAdes GFA file>) as file: line = file.readline() while line != "": # Identify lines with link information if "L" in line: strings = line.split("\t") f1, f2 = strings[1]+strings[2], strings[3]+strings[4] links_map[f1].add(f2) links_map[f2].add(f1) links.append(strings[1]+strings[2]+" "+strings[3]+strings[4]) line = file.readline()# Create graphg = Graph()# Add verticesg.add_vertices(node_count)for i in range(len(g.vs)): g.vs[i]["id"]= i g.vs[i]["label"]= str(i+1)for i in range(len(paths)): segments = paths[str(i)] start = segments[0] start_rev = "" if start.endswith("+"): start_rev = start[:-1]+"-" else: start_rev = start[:-1]+"+" end = segments[1] end_rev = "" if end.endswith("+"): end_rev = end[:-1]+"-" else: end_rev = end[:-1]+"+" new_links = [] if start in links_map: new_links.extend(list(links_map[start])) if start_rev in links_map: new_links.extend(list(links_map[start_rev])) if end in links_map: new_links.extend(list(links_map[end])) if end_rev in links_map: new_links.extend(list(links_map[end_rev])) for new_link in new_links: if new_link in segment_contigs: for contig in segment_contigs[new_link]: if i!=int(contig): g.add_edge(i,int(contig)) g.simplify(multiple=True, loops=False, combine_edges=None)
out_fig_name = "assembly_graph.png"visual_style = {}# Set bbox and marginvisual_style["bbox"] = (1500,1500)visual_style["margin"] = 30# Set vertex coloursvisual_style["vertex_color"] = 'white'# Set vertex sizevisual_style["vertex_size"] = 35# Set vertex lable sizevisual_style["vertex_label_size"] = 15# Don't curve the edgesvisual_style["edge_curved"] = False# Set the layoutmy_layout = g.layout_fruchterman_reingold()visual_style["layout"] = my_layout# Plot the graphplot(g, out_fig_name, **visual_style)
node_colours = []for i in range(node_count): if i in efaecalis_list: node_colours.append("red") elif i in saureus_list: node_colours.append("green") else: node_colours.append("white")out_fig_name = "coloured_assembly_graph.png"g.vs["color"] = node_coloursvisual_style = {}# Set bbox and marginvisual_style["bbox"] = (1500,1500)visual_style["margin"] = 30# Set vertex sizevisual_style["vertex_size"] = 35# Set vertex lable sizevisual_style["vertex_label_size"] = 15# Don't curve the edgesvisual_style["edge_curved"] = False# Set the layoutvisual_style["layout"] = my_layout# Plot the graphplot(g, out_fig_name, **visual_style)
My lab and I have developed a tool named GraphBin to refine binned contigs by making use of assembly graphs and the connectivity information between contigs. You can find the GitHub repo from here.
github.com
GraphBin is published in the OUP Bioinformatics journal. You can have a look at the publication for more information about the tool from DOI: 10.1093/bioinformatics/btaa180.
dx.doi.org
You can also read more on genome assembly and metagenomics from my previous articles listed below.
towardsdatascience.com
towardsdatascience.com
Hope you found my findings interesting. I would love to hear your thoughts.
Thank you for reading.
|
[
{
"code": null,
"e": 619,
"s": 172,
"text": "I have been working with assembly graphs of metagenomes for a while and I have come across many fascinating things. In this article, I will share with you some of my observations related to metagenomic assembly graphs and binning contigs obtained from those graphs. Assuming that you have a basic understanding about genome assembly (if not you can read my previous article Genome Assembly β The Holy Grail of Genome Analysis), letβs get started."
},
{
"code": null,
"e": 919,
"s": 619,
"text": "An assembly graph is used to represent the final assembly of a genome (or metagenomes). In simple terms, the assembler builds this assembly graph based on reads and their overlap information. Finally, the assembler resolves paths across the assembly graph and outputs non-branching paths as contigs."
},
{
"code": null,
"e": 1019,
"s": 919,
"text": "Given below is part of the visualisation of an assembly graph obtained from the tool named Bandage."
},
{
"code": null,
"e": 1313,
"s": 1019,
"text": "The most common file format used to represent assembly graphs is the GFA (Graph Fragment Assembly) format. A GFA file consists of a set of sequences and tab-delimited pairs of sequences with their overlaps. You can read more about this format from http://gfa-spec.github.io/GFA-spec/GFA1.html."
},
{
"code": null,
"e": 1710,
"s": 1313,
"text": "As shown in Figure 2, sequences are denoted starting from βSβ and overlaps (or links) between sequences are denoted starting from βLβ. The plus (+) and minus (-) signs denote whether the original sequence or its reverse complement is considered in the overlap. The value denoted with the letter βMβ in a link refers to the overlap length. In this sample file, the overlap length is 55 base pairs."
},
{
"code": null,
"e": 1848,
"s": 1710,
"text": "Since the assembly graph we are talking about is already a βgraphβ, we can model sequences to be vertices and overlaps/links to be edges."
},
{
"code": null,
"e": 1869,
"s": 1848,
"text": "Vertices β Sequences"
},
{
"code": null,
"e": 1904,
"s": 1869,
"text": "Edges β Overlaps between sequences"
},
{
"code": null,
"e": 2222,
"s": 1904,
"text": "Now let us visualise a sample assembly graph using python-igraph. You can read more about visualising graph data using python-igraph from my previous article. For the ease of explanation, I will not consider the plus and minus signs in the link information. In simple terms, I will be visualising an undirected graph."
},
{
"code": null,
"e": 2618,
"s": 2222,
"text": "We will consider a dataset consisting of reads from two bacterial species; E. faecalis and S. aureus. We will refer to this as the ES dataset. I have already assembled this dataset using metaSPAdes assembler to obtain contigs. This contig dataset known as ES+metaSPAdes, can be found from the link here as contigs.fasta. The assembly graph file can be found as assembly_graph_with_scaffolds.gfa."
},
{
"code": null,
"e": 3026,
"s": 2618,
"text": "Note: metaSPAdes represents each contig as a set of segments and the assembly graph file contains details about these segments and links between these segments. Hence, when obtaining the links between the contigs, you will have to scan the prefix and suffix for each contig in the contigs.paths file and the assembly_graph_with_scaffolds.gfa file to determine the prefix or suffix of the overlapping contig."
},
{
"code": null,
"e": 3095,
"s": 3026,
"text": "The visualisation of the ES+metaSPAdes dataset will look as follows."
},
{
"code": null,
"e": 3271,
"s": 3095,
"text": "We can align the contigs to reference genomes to determine to which genome each contig belong to. For this, we can use BWA-MEM. Given below is a sample command to run BWA-MEM."
},
{
"code": null,
"e": 3357,
"s": 3271,
"text": "bwa mem <path to reference genome> <path to contig file> > <output path>alignment.sam"
},
{
"code": null,
"e": 3479,
"s": 3357,
"text": "For each contig, the reference genome resulting in the longest alignment length can be considered as the contigβs source."
},
{
"code": null,
"e": 3612,
"s": 3479,
"text": "After determining the ground truth of the contigs, we can label the vertices (contigs) and visualise this data as shown in Figure 4."
},
{
"code": null,
"e": 3746,
"s": 3612,
"text": "As illustrated in Figure 5, we can see that the contigs from the two species tend to form two separate regions in the assembly graph."
},
{
"code": null,
"e": 3945,
"s": 3746,
"text": "Moreover, it is highly likely that contigs belonging to the same species tend to have overlaps among each other, apart from the boundary cases. We can make use of these data during binning analysis."
},
{
"code": null,
"e": 4034,
"s": 3945,
"text": "Here are some code snippets used to produce the images I have presented in this article."
},
{
"code": null,
"e": 6635,
"s": 4034,
"text": "import refrom igraph import *from collections import defaultdictpaths = {}segment_contigs = {}node_count = 0# Get contig paths from contigs.pathswith open(<path to metaSPAdes contigs.paths file>) as file: name = file.readline() path = file.readline() while name != \"\" and path != \"\": while \";\" in path: path = path[:-2]+\",\"+file.readline() start = 'NODE_' end = '_length_' contig_num = str(int(re.search('%s(.*)%s' % (start, end), name).group(1))-1) segments = path.rstrip().split(\",\") if contig_num not in paths: node_count += 1 paths[contig_num] = [segments[0], segments[-1]] for segment in segments: if segment not in segment_contigs: segment_contigs[segment] = set([contig_num]) else: segment_contigs[segment].add(contig_num) name = file.readline() path = file.readline()links = []links_map = defaultdict(set)# Get contig paths from contigs.pathswith open(<path to metaSPAdes GFA file>) as file: line = file.readline() while line != \"\": # Identify lines with link information if \"L\" in line: strings = line.split(\"\\t\") f1, f2 = strings[1]+strings[2], strings[3]+strings[4] links_map[f1].add(f2) links_map[f2].add(f1) links.append(strings[1]+strings[2]+\" \"+strings[3]+strings[4]) line = file.readline()# Create graphg = Graph()# Add verticesg.add_vertices(node_count)for i in range(len(g.vs)): g.vs[i][\"id\"]= i g.vs[i][\"label\"]= str(i+1)for i in range(len(paths)): segments = paths[str(i)] start = segments[0] start_rev = \"\" if start.endswith(\"+\"): start_rev = start[:-1]+\"-\" else: start_rev = start[:-1]+\"+\" end = segments[1] end_rev = \"\" if end.endswith(\"+\"): end_rev = end[:-1]+\"-\" else: end_rev = end[:-1]+\"+\" new_links = [] if start in links_map: new_links.extend(list(links_map[start])) if start_rev in links_map: new_links.extend(list(links_map[start_rev])) if end in links_map: new_links.extend(list(links_map[end])) if end_rev in links_map: new_links.extend(list(links_map[end_rev])) for new_link in new_links: if new_link in segment_contigs: for contig in segment_contigs[new_link]: if i!=int(contig): g.add_edge(i,int(contig)) g.simplify(multiple=True, loops=False, combine_edges=None)"
},
{
"code": null,
"e": 7142,
"s": 6635,
"text": "out_fig_name = \"assembly_graph.png\"visual_style = {}# Set bbox and marginvisual_style[\"bbox\"] = (1500,1500)visual_style[\"margin\"] = 30# Set vertex coloursvisual_style[\"vertex_color\"] = 'white'# Set vertex sizevisual_style[\"vertex_size\"] = 35# Set vertex lable sizevisual_style[\"vertex_label_size\"] = 15# Don't curve the edgesvisual_style[\"edge_curved\"] = False# Set the layoutmy_layout = g.layout_fruchterman_reingold()visual_style[\"layout\"] = my_layout# Plot the graphplot(g, out_fig_name, **visual_style)"
},
{
"code": null,
"e": 7798,
"s": 7142,
"text": "node_colours = []for i in range(node_count): if i in efaecalis_list: node_colours.append(\"red\") elif i in saureus_list: node_colours.append(\"green\") else: node_colours.append(\"white\")out_fig_name = \"coloured_assembly_graph.png\"g.vs[\"color\"] = node_coloursvisual_style = {}# Set bbox and marginvisual_style[\"bbox\"] = (1500,1500)visual_style[\"margin\"] = 30# Set vertex sizevisual_style[\"vertex_size\"] = 35# Set vertex lable sizevisual_style[\"vertex_label_size\"] = 15# Don't curve the edgesvisual_style[\"edge_curved\"] = False# Set the layoutvisual_style[\"layout\"] = my_layout# Plot the graphplot(g, out_fig_name, **visual_style)"
},
{
"code": null,
"e": 7996,
"s": 7798,
"text": "My lab and I have developed a tool named GraphBin to refine binned contigs by making use of assembly graphs and the connectivity information between contigs. You can find the GitHub repo from here."
},
{
"code": null,
"e": 8007,
"s": 7996,
"text": "github.com"
},
{
"code": null,
"e": 8181,
"s": 8007,
"text": "GraphBin is published in the OUP Bioinformatics journal. You can have a look at the publication for more information about the tool from DOI: 10.1093/bioinformatics/btaa180."
},
{
"code": null,
"e": 8192,
"s": 8181,
"text": "dx.doi.org"
},
{
"code": null,
"e": 8291,
"s": 8192,
"text": "You can also read more on genome assembly and metagenomics from my previous articles listed below."
},
{
"code": null,
"e": 8314,
"s": 8291,
"text": "towardsdatascience.com"
},
{
"code": null,
"e": 8337,
"s": 8314,
"text": "towardsdatascience.com"
},
{
"code": null,
"e": 8413,
"s": 8337,
"text": "Hope you found my findings interesting. I would love to hear your thoughts."
}
] |
java.util.regex.Matcher.replaceAll() Method
|
The java.util.regex.Matcher.replaceAll(String replacement) method replaces every subsequence of the input sequence that matches the pattern with the given replacement string.
Following is the declaration for java.util.regex.Matcher.replaceAll(String replacement) method.
public String replaceAll(String replacement)
replacement β The replacement string.
replacement β The replacement string.
The string constructed by replacing each matching subsequence by the replacement string, substituting captured subsequences as needed.
The following example shows the usage of java.util.regex.Matcher.replaceAll(String replacement) method.
package com.tutorialspoint;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class MatcherDemo {
private static String REGEX = "dog";
private static String INPUT = "The dog says meow " + "All dogs say meow.";
private static String REPLACE = "cat";
public static void main(String[] args) {
Pattern pattern = Pattern.compile(REGEX);
// get a matcher object
Matcher matcher = pattern.matcher(INPUT);
INPUT = matcher.replaceAll(REPLACE);
System.out.println(INPUT);
}
}
Let us compile and run the above program, this will produce the following result β
The cat says meow All cats say meow.
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2299,
"s": 2124,
"text": "The java.util.regex.Matcher.replaceAll(String replacement) method replaces every subsequence of the input sequence that matches the pattern with the given replacement string."
},
{
"code": null,
"e": 2395,
"s": 2299,
"text": "Following is the declaration for java.util.regex.Matcher.replaceAll(String replacement) method."
},
{
"code": null,
"e": 2441,
"s": 2395,
"text": "public String replaceAll(String replacement)\n"
},
{
"code": null,
"e": 2479,
"s": 2441,
"text": "replacement β The replacement string."
},
{
"code": null,
"e": 2517,
"s": 2479,
"text": "replacement β The replacement string."
},
{
"code": null,
"e": 2652,
"s": 2517,
"text": "The string constructed by replacing each matching subsequence by the replacement string, substituting captured subsequences as needed."
},
{
"code": null,
"e": 2756,
"s": 2652,
"text": "The following example shows the usage of java.util.regex.Matcher.replaceAll(String replacement) method."
},
{
"code": null,
"e": 3302,
"s": 2756,
"text": "package com.tutorialspoint;\n\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\npublic class MatcherDemo {\n private static String REGEX = \"dog\";\n private static String INPUT = \"The dog says meow \" + \"All dogs say meow.\";\n private static String REPLACE = \"cat\";\n \n public static void main(String[] args) {\n Pattern pattern = Pattern.compile(REGEX);\n \n // get a matcher object\n Matcher matcher = pattern.matcher(INPUT); \n INPUT = matcher.replaceAll(REPLACE);\n System.out.println(INPUT);\n }\n}"
},
{
"code": null,
"e": 3385,
"s": 3302,
"text": "Let us compile and run the above program, this will produce the following result β"
},
{
"code": null,
"e": 3423,
"s": 3385,
"text": "The cat says meow All cats say meow.\n"
},
{
"code": null,
"e": 3430,
"s": 3423,
"text": " Print"
},
{
"code": null,
"e": 3441,
"s": 3430,
"text": " Add Notes"
}
] |
Python program to split the even and odd elements into two different lists.
|
In this program we create a user input list and the elements are mixture of odd and even elements. Our task is to split these list into two list. One contains odd number of element and another is even number of elements.
Input: [1, 2, 3, 4, 5, 9, 8, 6]
Output
Even lists: [2, 4, 8, 6]
Odd lists: [1, 3, 5, 9]
Step 1 : create a user input list.
Step 2 : take two empty list one for odd and another for even.
Step 3 : then traverse each element in the main list.
Step 4 : every element is divided by 2, if remainder is 0 then itβs even number and add to the even list, otherwise its odd number and add to the odd list.
# Python code to split into even and odd lists
# Funtion to split
def splitevenodd(A):
evenlist = []
oddlist = []
for i in A:
if (i % 2 == 0):
evenlist.append(i)
else:
oddlist.append(i)
print("Even lists:", evenlist)
print("Odd lists:", oddlist)
# Driver Code
A=list()
n=int(input("Enter the size of the First List ::"))
print("Enter the Element of First List ::")
for i in range(int(n)):
k=int(input(""))
A.append(k)
splitevenodd(A)
Enter the size of the First List :: 8
Enter the Element of First List ::
1
2
3
4
5
9
8
6
Even lists: [2, 4, 8, 6]
Odd lists: [1, 3, 5, 9]
|
[
{
"code": null,
"e": 1283,
"s": 1062,
"text": "In this program we create a user input list and the elements are mixture of odd and even elements. Our task is to split these list into two list. One contains odd number of element and another is even number of elements."
},
{
"code": null,
"e": 1372,
"s": 1283,
"text": "Input: [1, 2, 3, 4, 5, 9, 8, 6]\nOutput\nEven lists: [2, 4, 8, 6]\nOdd lists: [1, 3, 5, 9]\n"
},
{
"code": null,
"e": 1681,
"s": 1372,
"text": "Step 1 : create a user input list.\nStep 2 : take two empty list one for odd and another for even.\nStep 3 : then traverse each element in the main list.\nStep 4 : every element is divided by 2, if remainder is 0 then itβs even number and add to the even list, otherwise its odd number and add to the odd list.\n"
},
{
"code": null,
"e": 2184,
"s": 1681,
"text": "# Python code to split into even and odd lists \n# Funtion to split \ndef splitevenodd(A): \n evenlist = [] \n oddlist = [] \n for i in A: \n if (i % 2 == 0): \n evenlist.append(i) \n else: \n oddlist.append(i) \n print(\"Even lists:\", evenlist) \n print(\"Odd lists:\", oddlist) \n \n# Driver Code \nA=list()\nn=int(input(\"Enter the size of the First List ::\"))\nprint(\"Enter the Element of First List ::\")\nfor i in range(int(n)):\n k=int(input(\"\"))\n A.append(k)\nsplitevenodd(A) "
},
{
"code": null,
"e": 2324,
"s": 2184,
"text": "Enter the size of the First List :: 8\nEnter the Element of First List ::\n1\n2\n3\n4\n5\n9\n8\n6\nEven lists: [2, 4, 8, 6]\nOdd lists: [1, 3, 5, 9]\n"
}
] |
MySQL - TIMEDIFF() Function
|
The DATE, DATETIME and TIMESTAMP datatypes in MySQL are used to store the date, date and time, time stamp values respectively. Where a time stamp is a numerical value representing the number of milliseconds from '1970-01-01 00:00:01' UTC (epoch) to the specified time. MySQL provides a set of functions to manipulate these values.
The MYSQL TIMEDIFF() function accepts two time or, date-time values as parameters, calculates the difference between them (argument1-argument2) and returns the result. Both arguments of this function must be of same type (either time or date-time) .
Following is the syntax of the above function β
TIMEDIFF(expr1, expr2)
Following example demonstrates the usage of the TIMEDIFF() function β
mysql> SELECT TIMEDIFF('21:26;12', '10:40:25');
+----------------------------------+
| TIMEDIFF('21:26;12', '10:40:25') |
+----------------------------------+
| 10:45:35 |
+----------------------------------+
1 row in set, 1 warning (0.03 sec)
Following is another example of this function β
mysql> SELECT TIMEDIFF('02:45;55', '17:29:45');
+----------------------------------+
| TIMEDIFF('02:45;55', '17:29:45') |
+----------------------------------+
| -14:44:45 |
+----------------------------------+
1 row in set, 1 warning (0.00 sec)
In the following example we are passing DATETIME values as an argument to this functionβ MySQL TIMEDIFF() Function
mysql> SELECT TIMEDIFF('2018-05-23 20:40:32', '1996-12-07 14:06:11');
+--------------------------------------------------------+
| TIMEDIFF('2018-05-23 20:40:32', '1996-12-07 14:06:11') |
+--------------------------------------------------------+
| 838:59:59 |
+--------------------------------------------------------+
1 row in set, 1 warning (0.00 sec)
In the following example we are passing the result of CURTIME() as an argument to the TIMEDIFF() function β
mysql> SELECT TIMEDIFF('20:03:25', CURDATE());
+---------------------------------+
| TIMEDIFF('20:03:25', CURDATE()) |
+---------------------------------+
| -838:59:59 |
+---------------------------------+
1 row in set, 1 warning (0.00 sec)
We can also pass current timestamp values as arguments to this function β
mysql> SELECT TIMEDIFF(NOW(), '15:09:05 22:58:45');
+--------------------------------------+
| TIMEDIFF(NOW(), '15:09:05 22:58:45') |
+--------------------------------------+
| 838:59:59 |
+--------------------------------------+
1 row in set, 1 warning (0.00 sec)
mysql> SELECT TIMEDIFF(CURRENT_TIMESTAMP(), '2015:06:26 00:56:12');
+------------------------------------------------------+
| TIMEDIFF(CURRENT_TIMESTAMP(), '2015:06:26 00:56:12') |
+------------------------------------------------------+
| 838:59:59 |
+------------------------------------------------------+
1 row in set, 1 warning (0.00 sec)
Let us create another table with name Sales in MySQL database using CREATE statement as follows β
mysql> CREATE TABLE sales(
ID INT,
ProductName VARCHAR(255),
CustomerName VARCHAR(255),
DispatchDate date,
DispatchTime time,
Price INT,
Location VARCHAR(255)
);
Query OK, 0 rows affected (2.22 sec)
Now, we will insert 5 records in Sales table using INSERT statements β
insert into sales values (1, 'Key-Board', 'Raja', DATE('2019-09-01'), TIME('11:00:00'), 7000, 'Hyderabad');
insert into sales values (2, 'Earphones', 'Roja', DATE('2019-05-01'), TIME('11:00:00'), 2000, 'Vishakhapatnam');
insert into sales values (3, 'Mouse', 'Puja', DATE('2019-03-01'), TIME('10:59:59'), 3000, 'Vijayawada');
insert into sales values (4, 'Mobile', 'Vanaja', DATE('2019-03-01'), TIME('10:10:52'), 9000, 'Chennai');
insert into sales values (5, 'Headset', 'Jalaja', DATE('2019-04-06'), TIME('11:08:59'), 6000, 'Goa');
Following is another example of this function β
mysql> SELECT ProductName, CustomerName, DispatchDate, Price, DATEDIFF(CURRENT_DATE, DispatchDate) as difference_in_days, TIMEDIFF(CURTIME(), DispatchTime) as time_difference FROM sales;
+-------------+--------------+--------------+-------+--------------------+-----------------+
| ProductName | CustomerName | DispatchDate | Price | difference_in_days | time_difference |
+-------------+--------------+--------------+-------+--------------------+-----------------+
| Key-Board | Raja | 2019-09-01 | 7000 | 679 | 06:52:52 |
| Earphones | Roja | 2019-05-01 | 2000 | 802 | 06:52:52 |
| Mouse | Puja | 2019-03-01 | 3000 | 863 | 06:52:53 |
| Mobile | Vanaja | 2019-03-01 | 9000 | 863 | 07:42:00 |
| Headset | Jalaja | 2019-04-06 | 6000 | 827 | 06:43:53 |
+-------------+--------------+--------------+-------+--------------------+-----------------+
5 rows in set (0.00 sec)
Suppose we have created a table named SubscribersData with 5 records in it using the following queries β
mysql> CREATE TABLE SubscribersData(
SubscriberName VARCHAR(255),
PackageName VARCHAR(255),
SubscriptionDate date,
SubscriptionTime time
);
insert into SubscribersData values('Raja', 'Premium', Date('2020-10-21'), Time('20:53:49'));
insert into SubscribersData values('Roja', 'Basic', Date('2020-11-26'), Time('10:13:19'));
insert into SubscribersData values('Puja', 'Moderate', Date('2021-03-07'), Time('05:43:20'));
insert into SubscribersData values('Vanaja', 'Basic', Date('2021-02-21'), Time('16:36:39'));
insert into SubscribersData values('Jalaja', 'Premium', Date('2021-01-30'), Time('12:45:45'));
Following query calculates and displays the remaining number of days and time for the subscription to complete β
mysql> SELECT SubscriberName, PackageName, DATEDIFF(CURRENT_DATE, SubscriptionDate) as RemainingDays, TIMEDIFF(CURTIME(), SubscriptionTime) as RemainingTime FROM SubscribersData;
+----------------+-------------+---------------+---------------+
| SubscriberName | PackageName | RemainingDays | RemainingTime |
+----------------+-------------+---------------+---------------+
| Raja | Premium | 263 | -03:00:17 |
| Roja | Basic | 227 | 07:40:13 |
| Puja | Moderate | 126 | 12:10:12 |
| Vanaja | Basic | 140 | 01:16:53 |
| Jalaja | Premium | 162 | 05:07:47 |
+----------------+-------------+---------------+---------------+
5 rows in set (0.05 sec)
31 Lectures
6 hours
Eduonix Learning Solutions
84 Lectures
5.5 hours
Frahaan Hussain
6 Lectures
3.5 hours
DATAhill Solutions Srinivas Reddy
60 Lectures
10 hours
Vijay Kumar Parvatha Reddy
10 Lectures
1 hours
Harshit Srivastava
25 Lectures
4 hours
Trevoir Williams
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2664,
"s": 2333,
"text": "The DATE, DATETIME and TIMESTAMP datatypes in MySQL are used to store the date, date and time, time stamp values respectively. Where a time stamp is a numerical value representing the number of milliseconds from '1970-01-01 00:00:01' UTC (epoch) to the specified time. MySQL provides a set of functions to manipulate these values."
},
{
"code": null,
"e": 2914,
"s": 2664,
"text": "The MYSQL TIMEDIFF() function accepts two time or, date-time values as parameters, calculates the difference between them (argument1-argument2) and returns the result. Both arguments of this function must be of same type (either time or date-time) ."
},
{
"code": null,
"e": 2962,
"s": 2914,
"text": "Following is the syntax of the above function β"
},
{
"code": null,
"e": 2986,
"s": 2962,
"text": "TIMEDIFF(expr1, expr2)\n"
},
{
"code": null,
"e": 3056,
"s": 2986,
"text": "Following example demonstrates the usage of the TIMEDIFF() function β"
},
{
"code": null,
"e": 3324,
"s": 3056,
"text": "mysql> SELECT TIMEDIFF('21:26;12', '10:40:25');\n+----------------------------------+\n| TIMEDIFF('21:26;12', '10:40:25') |\n+----------------------------------+\n| 10:45:35 |\n+----------------------------------+\n1 row in set, 1 warning (0.03 sec)"
},
{
"code": null,
"e": 3372,
"s": 3324,
"text": "Following is another example of this function β"
},
{
"code": null,
"e": 3640,
"s": 3372,
"text": "mysql> SELECT TIMEDIFF('02:45;55', '17:29:45');\n+----------------------------------+\n| TIMEDIFF('02:45;55', '17:29:45') |\n+----------------------------------+\n| -14:44:45 |\n+----------------------------------+\n1 row in set, 1 warning (0.00 sec)"
},
{
"code": null,
"e": 3755,
"s": 3640,
"text": "In the following example we are passing DATETIME values as an argument to this functionβ MySQL TIMEDIFF() Function"
},
{
"code": null,
"e": 4155,
"s": 3755,
"text": "mysql> SELECT TIMEDIFF('2018-05-23 20:40:32', '1996-12-07 14:06:11');\n+--------------------------------------------------------+\n| TIMEDIFF('2018-05-23 20:40:32', '1996-12-07 14:06:11') |\n+--------------------------------------------------------+\n| 838:59:59 |\n+--------------------------------------------------------+\n1 row in set, 1 warning (0.00 sec)"
},
{
"code": null,
"e": 4263,
"s": 4155,
"text": "In the following example we are passing the result of CURTIME() as an argument to the TIMEDIFF() function β"
},
{
"code": null,
"e": 4525,
"s": 4263,
"text": "mysql> SELECT TIMEDIFF('20:03:25', CURDATE());\n+---------------------------------+\n| TIMEDIFF('20:03:25', CURDATE()) |\n+---------------------------------+\n| -838:59:59 |\n+---------------------------------+\n1 row in set, 1 warning (0.00 sec)"
},
{
"code": null,
"e": 4599,
"s": 4525,
"text": "We can also pass current timestamp values as arguments to this function β"
},
{
"code": null,
"e": 5280,
"s": 4599,
"text": "mysql> SELECT TIMEDIFF(NOW(), '15:09:05 22:58:45');\n+--------------------------------------+\n| TIMEDIFF(NOW(), '15:09:05 22:58:45') |\n+--------------------------------------+\n| 838:59:59 |\n+--------------------------------------+\n1 row in set, 1 warning (0.00 sec)\n\nmysql> SELECT TIMEDIFF(CURRENT_TIMESTAMP(), '2015:06:26 00:56:12');\n+------------------------------------------------------+\n| TIMEDIFF(CURRENT_TIMESTAMP(), '2015:06:26 00:56:12') |\n+------------------------------------------------------+\n| 838:59:59 |\n+------------------------------------------------------+\n1 row in set, 1 warning (0.00 sec)"
},
{
"code": null,
"e": 5378,
"s": 5280,
"text": "Let us create another table with name Sales in MySQL database using CREATE statement as follows β"
},
{
"code": null,
"e": 5584,
"s": 5378,
"text": "mysql> CREATE TABLE sales(\n\tID INT,\n\tProductName VARCHAR(255),\n\tCustomerName VARCHAR(255),\n\tDispatchDate date,\n\tDispatchTime time,\n\tPrice INT,\n\tLocation VARCHAR(255)\n);\nQuery OK, 0 rows affected (2.22 sec)"
},
{
"code": null,
"e": 5655,
"s": 5584,
"text": "Now, we will insert 5 records in Sales table using INSERT statements β"
},
{
"code": null,
"e": 6188,
"s": 5655,
"text": "insert into sales values (1, 'Key-Board', 'Raja', DATE('2019-09-01'), TIME('11:00:00'), 7000, 'Hyderabad');\ninsert into sales values (2, 'Earphones', 'Roja', DATE('2019-05-01'), TIME('11:00:00'), 2000, 'Vishakhapatnam');\ninsert into sales values (3, 'Mouse', 'Puja', DATE('2019-03-01'), TIME('10:59:59'), 3000, 'Vijayawada');\ninsert into sales values (4, 'Mobile', 'Vanaja', DATE('2019-03-01'), TIME('10:10:52'), 9000, 'Chennai');\ninsert into sales values (5, 'Headset', 'Jalaja', DATE('2019-04-06'), TIME('11:08:59'), 6000, 'Goa');"
},
{
"code": null,
"e": 6236,
"s": 6188,
"text": "Following is another example of this function β"
},
{
"code": null,
"e": 7285,
"s": 6236,
"text": "mysql> SELECT ProductName, CustomerName, DispatchDate, Price, DATEDIFF(CURRENT_DATE, DispatchDate) as difference_in_days, TIMEDIFF(CURTIME(), DispatchTime) as time_difference FROM sales;\n+-------------+--------------+--------------+-------+--------------------+-----------------+\n| ProductName | CustomerName | DispatchDate | Price | difference_in_days | time_difference |\n+-------------+--------------+--------------+-------+--------------------+-----------------+\n| Key-Board | Raja | 2019-09-01 | 7000 | 679 | 06:52:52 |\n| Earphones | Roja | 2019-05-01 | 2000 | 802 | 06:52:52 |\n| Mouse | Puja | 2019-03-01 | 3000 | 863 | 06:52:53 |\n| Mobile | Vanaja | 2019-03-01 | 9000 | 863 | 07:42:00 |\n| Headset | Jalaja | 2019-04-06 | 6000 | 827 | 06:43:53 |\n+-------------+--------------+--------------+-------+--------------------+-----------------+\n5 rows in set (0.00 sec)"
},
{
"code": null,
"e": 7390,
"s": 7285,
"text": "Suppose we have created a table named SubscribersData with 5 records in it using the following queries β"
},
{
"code": null,
"e": 8000,
"s": 7390,
"text": "mysql> CREATE TABLE SubscribersData(\n\tSubscriberName VARCHAR(255),\n\tPackageName VARCHAR(255),\n\tSubscriptionDate date,\n\tSubscriptionTime time\n);\ninsert into SubscribersData values('Raja', 'Premium', Date('2020-10-21'), Time('20:53:49'));\ninsert into SubscribersData values('Roja', 'Basic', Date('2020-11-26'), Time('10:13:19'));\ninsert into SubscribersData values('Puja', 'Moderate', Date('2021-03-07'), Time('05:43:20'));\ninsert into SubscribersData values('Vanaja', 'Basic', Date('2021-02-21'), Time('16:36:39'));\ninsert into SubscribersData values('Jalaja', 'Premium', Date('2021-01-30'), Time('12:45:45'));"
},
{
"code": null,
"e": 8113,
"s": 8000,
"text": "Following query calculates and displays the remaining number of days and time for the subscription to complete β"
},
{
"code": null,
"e": 8903,
"s": 8113,
"text": "mysql> SELECT SubscriberName, PackageName, DATEDIFF(CURRENT_DATE, SubscriptionDate) as RemainingDays, TIMEDIFF(CURTIME(), SubscriptionTime) as RemainingTime FROM SubscribersData;\n+----------------+-------------+---------------+---------------+\n| SubscriberName | PackageName | RemainingDays | RemainingTime |\n+----------------+-------------+---------------+---------------+\n| Raja | Premium | 263 | -03:00:17 |\n| Roja | Basic | 227 | 07:40:13 | \n| Puja | Moderate | 126 | 12:10:12 |\n| Vanaja | Basic | 140 | 01:16:53 |\n| Jalaja | Premium | 162 | 05:07:47 |\n+----------------+-------------+---------------+---------------+\n5 rows in set (0.05 sec)"
},
{
"code": null,
"e": 8936,
"s": 8903,
"text": "\n 31 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 8964,
"s": 8936,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 8999,
"s": 8964,
"text": "\n 84 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 9016,
"s": 8999,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 9050,
"s": 9016,
"text": "\n 6 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 9085,
"s": 9050,
"text": " DATAhill Solutions Srinivas Reddy"
},
{
"code": null,
"e": 9119,
"s": 9085,
"text": "\n 60 Lectures \n 10 hours \n"
},
{
"code": null,
"e": 9147,
"s": 9119,
"text": " Vijay Kumar Parvatha Reddy"
},
{
"code": null,
"e": 9180,
"s": 9147,
"text": "\n 10 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 9200,
"s": 9180,
"text": " Harshit Srivastava"
},
{
"code": null,
"e": 9233,
"s": 9200,
"text": "\n 25 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 9251,
"s": 9233,
"text": " Trevoir Williams"
},
{
"code": null,
"e": 9258,
"s": 9251,
"text": " Print"
},
{
"code": null,
"e": 9269,
"s": 9258,
"text": " Add Notes"
}
] |
Anonymous function in Dart Programming
|
A function without a name is known as an anonymous function. They behave in the exact same manner as a normal named function would. The only difference between the named and an anonymous function is how different they are in syntax.
Anonymous functions are used in Dart to form closures. An anonymous function contains a self-contained block of codes, also it can be passed as a parameter to another function as well.
(parameterList){
// inner statement(s)
}
Now, let's consider a simple example of an anonymous function.
Consider the example shown below β
Live Demo
void main() {
var fruits = ["Apple", "Mango", "Banana", "Kiwi"];
fruits.forEach((item) {
print('${fruits.indexOf(item)}: $item');
});
}
In the above example, we have an anonymous function with an untyped parameter named item.
0: Apple
1: Mango
2: Banana
3: Kiwi
|
[
{
"code": null,
"e": 1295,
"s": 1062,
"text": "A function without a name is known as an anonymous function. They behave in the exact same manner as a normal named function would. The only difference between the named and an anonymous function is how different they are in syntax."
},
{
"code": null,
"e": 1480,
"s": 1295,
"text": "Anonymous functions are used in Dart to form closures. An anonymous function contains a self-contained block of codes, also it can be passed as a parameter to another function as well."
},
{
"code": null,
"e": 1524,
"s": 1480,
"text": "(parameterList){\n // inner statement(s)\n}"
},
{
"code": null,
"e": 1587,
"s": 1524,
"text": "Now, let's consider a simple example of an anonymous function."
},
{
"code": null,
"e": 1622,
"s": 1587,
"text": "Consider the example shown below β"
},
{
"code": null,
"e": 1633,
"s": 1622,
"text": " Live Demo"
},
{
"code": null,
"e": 1784,
"s": 1633,
"text": "void main() {\n var fruits = [\"Apple\", \"Mango\", \"Banana\", \"Kiwi\"];\n fruits.forEach((item) {\n print('${fruits.indexOf(item)}: $item');\n });\n}"
},
{
"code": null,
"e": 1874,
"s": 1784,
"text": "In the above example, we have an anonymous function with an untyped parameter named item."
},
{
"code": null,
"e": 1910,
"s": 1874,
"text": "0: Apple\n1: Mango\n2: Banana\n3: Kiwi"
}
] |
TypeORM with Express
|
Express is one of the popular JavaScript framework to create web application. Let us learn how to use TypeORM along with express framework in this chapter.
TypeORM CLI provides an easy option to create a complete working express web application (Restful API application) integrated with TypeORM. The CLI command to create the application is as follows β
cd /path/to/workspace typeorm init --express --name typeorm-express-sample --database mysql
Above command will create a new web application under typeorm-express-sample folder. The structure of the application is as follows β
β .gitignore
β ormconfig.json
β package.json
β README.md
β tsconfig.json
β ββββsrc
β index.ts
β routes.ts
β
ββββcontroller
β UserController.ts
β
ββββentity
β User.ts
β
ββββmigration
Here,
As we know, ormconfig.json is the TypeORM configuration file. The code is as follows,
{
"type": "mysql",
"host": "localhost",
"port": 3306,
"username": "test",
"password": "test",
"database": "test",
"synchronize": true,
"logging": false,
"entities": [
"src/entity/**/*.ts"
],
"migrations": [ "src/migration/**/*.ts"
],
"subscribers": [ "src/subscriber/**/*.ts"
],
"cli": {
"entitiesDir": "src/entity", "migrationsDir": "src/migration", "subscribersDir": "src/subscriber"
}
}
Here, change the database setting to match your local database setting.
package.json file is the main configuration of the application.
tsconfig.json file contains the configuration related to TypeScript.
entity folder contains the TypeORM models. A default User model will be created by CLI and it is as follows β
import {Entity, PrimaryGeneratedColumn, Column} from "typeorm";
@Entity()
export class User {
@PrimaryGeneratedColumn()
id: number;
@Column()
firstName: string;
@Column()
lastName: string;
@Column()
age: number;
}
controller folder contains the express controllers. CLI create a default user API controller with add / list / delete user details. The code is as follows β
import {getRepository} from "typeorm"; import {NextFunction, Request, Response} from "express"; import {User} from "../entity/User";
export class UserController {
private userRepository = getRepository(User);
async all(request: Request, response: Response, next: NextFunction) {
return this.userRepository.find();
}
async one(request: Request, response: Response, next: NextFunction) {
return this.userRepository.findOne(request.params.id);
}
async save(request: Request, response: Response, next: NextFunction) {
return this.userRepository.save(request.body);
}
async remove(request: Request, response: Response, next: NextFunction) {
let userToRemove = await this.userRepository.findOne(request.params.id);
await this.userRepository.remove(userToRemove);
}
}
Here,
all method is used to fetch all users from the database.
one method is used to fetch a single user from the database using user id
save method is used to save the user information into the database.
delete method is used to delete the user from the database using user id
routes.ts file maps the user controller methods to proper URL and the code is as follows β
import {UserController} from "./controller/UserController";
export const Routes = [{
method: "get",
route: "/users",
controller: UserController, action: "all"
}, {
method: "get",
route: "/users/:id", controller: UserController, action: "one"
}, {
method: "post",
route: "/users",
controller: UserController, action: "save"
}, {
method: "delete", route: "/users/:id", controller: UserController,
action: "remove"
}];
Here,
/users url is mapped to user controller. Each verb post, get and delete are mapped to different methods.
Finally, index.ts is our main web application entry point. The source code is as follows β
import "reflect-metadata";
import {createConnection} from "typeorm";
import * as express from "express"; import * as bodyParser from "body-parser";
import {Request, Response} from "express";
import {Routes} from "./routes"; import {User} from "./entity/User";
createConnection().then(async connection => {
// create express app const app = express(); app.use(bodyParser.json());
// register express routes from defined application routes Routes.forEach(route => {
(app as any)[route.method](route.route, (req: Request, res: Response, next: Function) => {
const result = (new (route.controller as any))[route.action](req, res, next);
if (result instanceof Promise) {
result.then(result => result !== null && result !== undefined ? res.send(result) : undefined);
} else if (result !== null && result !== undefined) {
.json(result);
}
});
});
// setup express app here
// ...
// start express server app.listen(3000);
// insert new users for test await connection.manager.save(connection.manager.create(User, {
firstName: "Timber",
lastName: "Saw",
age: 27
}));
await connection.manager.save(connection.manager.create(User, {
firstName: "Phantom",
lastName: "Assassin",
age: 24
}));
console.log("Express server has started on port 3000. Open http://localhost:3000/users to see results");
}).catch(error => console.log(error));
Here, the application configures the routes, insert two users and then start the web application at port 3000. We can access the application at http://localhost:3000
To run the application, follow below steps β
Let us install the necessary packages using below command β
npm install
npm notice created a lockfile as package-lock.json. You should commit this file.
npm WARN typeorm-express-sample@0.0.1 No repository field.
npm WARN typeorm-express-sample@0.0.1 No license field.
added 176 packages from 472 contributors and audited 351 packages in 11.965s
3 packages are looking for funding run `npm fund` for details
found 0 vulnerabilities
Run the below command to start the application.
npm start
> typeorm-express-sample@0.0.1 start /path/to/workspace/typeorm-express-sample
> ts-node src/index.ts
Express server has started on port 3000. Open http://localhost:3000/users to see results
Let us access our web application API using curl command as below β
curl http://localhost:3000/users
Here,
curl is a command line application to access web application from command prompt. It supports all the HTTP verbs such as get, post, delete, etc.,
[{"id":1,"firstName":"Timber","lastName":"Saw","age":27},{"id":2,"firstName":"Phantom","lastName":"Assassin","age":24}]
To fetch the first record, we can use below command β
curl http://localhost:3000/users/1
{"id":1,"firstName":"Timber","lastName":"Saw","age":27}
To delete a user record, we can use below command β
curl -X DELETE http://localhost:3000/users/1
As we seen in this chapter, TypeORM can be easily integrated into express application.
19 Lectures
50 mins
James Coonce
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2207,
"s": 2051,
"text": "Express is one of the popular JavaScript framework to create web application. Let us learn how to use TypeORM along with express framework in this chapter."
},
{
"code": null,
"e": 2405,
"s": 2207,
"text": "TypeORM CLI provides an easy option to create a complete working express web application (Restful API application) integrated with TypeORM. The CLI command to create the application is as follows β"
},
{
"code": null,
"e": 2498,
"s": 2405,
"text": "cd /path/to/workspace typeorm init --express --name typeorm-express-sample --database mysql\n"
},
{
"code": null,
"e": 2632,
"s": 2498,
"text": "Above command will create a new web application under typeorm-express-sample folder. The structure of the application is as follows β"
},
{
"code": null,
"e": 2899,
"s": 2632,
"text": "β .gitignore \nβ ormconfig.json \nβ package.json \nβ README.md \nβ tsconfig.json \nβ ββββsrc \n β index.ts \n β routes.ts \n β \n ββββcontroller \n β UserController.ts \n β \n ββββentity \n β User.ts \n β \n ββββmigration"
},
{
"code": null,
"e": 2905,
"s": 2899,
"text": "Here,"
},
{
"code": null,
"e": 2991,
"s": 2905,
"text": "As we know, ormconfig.json is the TypeORM configuration file. The code is as follows,"
},
{
"code": null,
"e": 3459,
"s": 2991,
"text": "{ \n \"type\": \"mysql\", \n \"host\": \"localhost\", \n \"port\": 3306, \n \"username\": \"test\", \n \"password\": \"test\", \n \"database\": \"test\", \n \"synchronize\": true, \n \"logging\": false, \n \"entities\": [\n \"src/entity/**/*.ts\" \n ], \n \"migrations\": [ \"src/migration/**/*.ts\" \n ], \n \"subscribers\": [ \"src/subscriber/**/*.ts\" \n ], \n \"cli\": { \n \"entitiesDir\": \"src/entity\", \"migrationsDir\": \"src/migration\", \"subscribersDir\": \"src/subscriber\" \n } \n}"
},
{
"code": null,
"e": 3531,
"s": 3459,
"text": "Here, change the database setting to match your local database setting."
},
{
"code": null,
"e": 3595,
"s": 3531,
"text": "package.json file is the main configuration of the application."
},
{
"code": null,
"e": 3664,
"s": 3595,
"text": "tsconfig.json file contains the configuration related to TypeScript."
},
{
"code": null,
"e": 3774,
"s": 3664,
"text": "entity folder contains the TypeORM models. A default User model will be created by CLI and it is as follows β"
},
{
"code": null,
"e": 4040,
"s": 3774,
"text": "import {Entity, PrimaryGeneratedColumn, Column} from \"typeorm\"; \n\n@Entity() \nexport class User { \n \n @PrimaryGeneratedColumn() \n id: number; \n \n @Column() \n firstName: string; \n \n @Column() \n lastName: string; \n \n @Column() \n age: number; \n}"
},
{
"code": null,
"e": 4197,
"s": 4040,
"text": "controller folder contains the express controllers. CLI create a default user API controller with add / list / delete user details. The code is as follows β"
},
{
"code": null,
"e": 5048,
"s": 4197,
"text": "import {getRepository} from \"typeorm\"; import {NextFunction, Request, Response} from \"express\"; import {User} from \"../entity/User\"; \n\nexport class UserController {\n\n private userRepository = getRepository(User); \n \n async all(request: Request, response: Response, next: NextFunction) { \n return this.userRepository.find(); \n } \n \n async one(request: Request, response: Response, next: NextFunction) { \n return this.userRepository.findOne(request.params.id); \n } \n \n async save(request: Request, response: Response, next: NextFunction) { \n return this.userRepository.save(request.body); \n } \n \n async remove(request: Request, response: Response, next: NextFunction) { \n let userToRemove = await this.userRepository.findOne(request.params.id); \n await this.userRepository.remove(userToRemove); \n } \n}"
},
{
"code": null,
"e": 5054,
"s": 5048,
"text": "Here,"
},
{
"code": null,
"e": 5111,
"s": 5054,
"text": "all method is used to fetch all users from the database."
},
{
"code": null,
"e": 5185,
"s": 5111,
"text": "one method is used to fetch a single user from the database using user id"
},
{
"code": null,
"e": 5253,
"s": 5185,
"text": "save method is used to save the user information into the database."
},
{
"code": null,
"e": 5326,
"s": 5253,
"text": "delete method is used to delete the user from the database using user id"
},
{
"code": null,
"e": 5417,
"s": 5326,
"text": "routes.ts file maps the user controller methods to proper URL and the code is as follows β"
},
{
"code": null,
"e": 5917,
"s": 5417,
"text": "import {UserController} from \"./controller/UserController\"; \n\nexport const Routes = [{ \n method: \"get\", \n route: \"/users\", \n controller: UserController, action: \"all\" \n }, { \n method: \"get\", \n route: \"/users/:id\", controller: UserController, action: \"one\" \n }, { \n method: \"post\", \n route: \"/users\", \n controller: UserController, action: \"save\" \n }, { \n method: \"delete\", route: \"/users/:id\", controller: UserController,\n action: \"remove\" \n}];"
},
{
"code": null,
"e": 5923,
"s": 5917,
"text": "Here,"
},
{
"code": null,
"e": 6028,
"s": 5923,
"text": "/users url is mapped to user controller. Each verb post, get and delete are mapped to different methods."
},
{
"code": null,
"e": 6119,
"s": 6028,
"text": "Finally, index.ts is our main web application entry point. The source code is as follows β"
},
{
"code": null,
"e": 7646,
"s": 6119,
"text": "import \"reflect-metadata\"; \nimport {createConnection} from \"typeorm\"; \nimport * as express from \"express\"; import * as bodyParser from \"body-parser\"; \nimport {Request, Response} from \"express\"; \nimport {Routes} from \"./routes\"; import {User} from \"./entity/User\"; \n\ncreateConnection().then(async connection => { \n\n // create express app const app = express(); app.use(bodyParser.json()); \n\n // register express routes from defined application routes Routes.forEach(route => { \n (app as any)[route.method](route.route, (req: Request, res: Response, next: Function) => { \n const result = (new (route.controller as any))[route.action](req, res, next); \n if (result instanceof Promise) { \n result.then(result => result !== null && result !== undefined ? res.send(result) : undefined); \n } else if (result !== null && result !== undefined) { \n .json(result); \n } \n }); \n }); \n \n // setup express app here \n // ... \n \n // start express server app.listen(3000); \n \n // insert new users for test await connection.manager.save(connection.manager.create(User, { \n firstName: \"Timber\",\n lastName: \"Saw\", \n age: 27 \n }));\n await connection.manager.save(connection.manager.create(User, { \n firstName: \"Phantom\", \n lastName: \"Assassin\", \n age: 24 \n })); \n \n console.log(\"Express server has started on port 3000. Open http://localhost:3000/users to see results\"); \n}).catch(error => console.log(error));"
},
{
"code": null,
"e": 7812,
"s": 7646,
"text": "Here, the application configures the routes, insert two users and then start the web application at port 3000. We can access the application at http://localhost:3000"
},
{
"code": null,
"e": 7857,
"s": 7812,
"text": "To run the application, follow below steps β"
},
{
"code": null,
"e": 7917,
"s": 7857,
"text": "Let us install the necessary packages using below command β"
},
{
"code": null,
"e": 7930,
"s": 7917,
"text": "npm install\n"
},
{
"code": null,
"e": 8298,
"s": 7930,
"text": "npm notice created a lockfile as package-lock.json. You should commit this file. \nnpm WARN typeorm-express-sample@0.0.1 No repository field. \nnpm WARN typeorm-express-sample@0.0.1 No license field. \n\nadded 176 packages from 472 contributors and audited 351 packages in 11.965s \n\n3 packages are looking for funding run `npm fund` for details \n\nfound 0 vulnerabilities"
},
{
"code": null,
"e": 8346,
"s": 8298,
"text": "Run the below command to start the application."
},
{
"code": null,
"e": 8357,
"s": 8346,
"text": "npm start\n"
},
{
"code": null,
"e": 8551,
"s": 8357,
"text": "> typeorm-express-sample@0.0.1 start /path/to/workspace/typeorm-express-sample \n> ts-node src/index.ts \n\nExpress server has started on port 3000. Open http://localhost:3000/users to see results"
},
{
"code": null,
"e": 8619,
"s": 8551,
"text": "Let us access our web application API using curl command as below β"
},
{
"code": null,
"e": 8653,
"s": 8619,
"text": "curl http://localhost:3000/users\n"
},
{
"code": null,
"e": 8659,
"s": 8653,
"text": "Here,"
},
{
"code": null,
"e": 8805,
"s": 8659,
"text": "curl is a command line application to access web application from command prompt. It supports all the HTTP verbs such as get, post, delete, etc.,"
},
{
"code": null,
"e": 8926,
"s": 8805,
"text": "[{\"id\":1,\"firstName\":\"Timber\",\"lastName\":\"Saw\",\"age\":27},{\"id\":2,\"firstName\":\"Phantom\",\"lastName\":\"Assassin\",\"age\":24}]\n"
},
{
"code": null,
"e": 8980,
"s": 8926,
"text": "To fetch the first record, we can use below command β"
},
{
"code": null,
"e": 9016,
"s": 8980,
"text": "curl http://localhost:3000/users/1\n"
},
{
"code": null,
"e": 9073,
"s": 9016,
"text": "{\"id\":1,\"firstName\":\"Timber\",\"lastName\":\"Saw\",\"age\":27}\n"
},
{
"code": null,
"e": 9125,
"s": 9073,
"text": "To delete a user record, we can use below command β"
},
{
"code": null,
"e": 9171,
"s": 9125,
"text": "curl -X DELETE http://localhost:3000/users/1\n"
},
{
"code": null,
"e": 9258,
"s": 9171,
"text": "As we seen in this chapter, TypeORM can be easily integrated into express application."
},
{
"code": null,
"e": 9290,
"s": 9258,
"text": "\n 19 Lectures \n 50 mins\n"
},
{
"code": null,
"e": 9304,
"s": 9290,
"text": " James Coonce"
},
{
"code": null,
"e": 9311,
"s": 9304,
"text": " Print"
},
{
"code": null,
"e": 9322,
"s": 9311,
"text": " Add Notes"
}
] |
DateFormat parse(string , ParsePosition) Method in Java with Examples - GeeksforGeeks
|
24 Jan, 2022
DateFormat class of java.text package is an abstract class that is used to format and parse dates for any locale. It allows us to format date to text and parse text to date. DateFormat class provides many functionalities to obtain, format, parse default date/time.
Note: DateFormat class extends Format class that means it is a subclass of Format class. Since DateFormat class is an abstract class, therefore, it can be used for date/time formatting subclasses, which format and parses dates or times in a language-independent manner.
Package-view:
java.text Package
DateFormat Class
parse(string , ParsePosition) Method
The parse(String the_text, ParsePosition position) method of DateFormat class is used to parse the text from a string to produce the Date. The method parses the text starting at the index given by a start position.
Syntax:
public abstract Date parse(String the_text, ParsePosition position)
Parameters: It takes 2 parameters:
the_text: This is of the String type and refers to the string which is to be parsed to produce the date.
position: This is of ParsePosition object type and refers to the information of the starting index of the parse.
Return Type: Returns the Date parsed from the string or Null in case of an error.
Example 1:
Java
// Java Program to Illustrate parse() Method// of DateFormat Class // Importing required classesimport java.text.*;import java.util.Calendar; // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of DateFormat class inside main() DateFormat DFormat = new SimpleDateFormat("MM/ dd/ yy"); // Try block to check for exceptions try { Calendar cal = Calendar.getInstance(); // Parsing date From string // using parse() method of DateFormat class // Custom string date String dt = "10/ 27/ 16"; // Printing the above unparsed date System.out.println("The unparsed" + " string is: " + dt); // Parsing date using parse() method cal.setTime(DFormat.parse(dt)); // Printing the parsed time System.out.println("Time parsed: " + cal.getTime()); } // Catch block to handle exceptions catch (ParseException except) { // Display exceptions with line number // using printStackTrace() method except.printStackTrace(); } }}
The unparsed string is: 10/ 27/ 16
Time parsed: Thu Oct 27 00:00:00 UTC 2016
Example 2:
Java
// Java Program to Illustrate parse() Method// of DateFormat Class // Importing required classesimport java.text.*;import java.util.Calendar; // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating an object of DateFormat class DateFormat DFormat = new SimpleDateFormat("MM/ dd/ yy"); // Try bloc kto check for exceptions try { // Getting instance from calendar Calendar cal = Calendar.getInstance(); // Parsing date from string // using parse() method String dt = "01/ 29/ 19"; // Displaying the unparsed date System.out.println("The unparsed" + " string is: " + dt); // Parsing date cal.setTime(DFormat.parse(dt)); System.out.println("Time parsed: " + cal.getTime()); } // Catch block to handle the exceptions catch (ParseException except) { // Display exception with line number // using printStackTrace() method except.printStackTrace(); } }}
The unparsed string is: 01/ 29/ 19
Time parsed: Tue Jan 29 00:00:00 UTC 2019
kalrap615
solankimayank
Java-DateFormat
Java-Functions
Java-text package
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Initialize an ArrayList in Java
Interfaces in Java
ArrayList in Java
Multidimensional Arrays in Java
Stack Class in Java
Stream In Java
Singleton Class in Java
Set in Java
Overriding in Java
LinkedList in Java
|
[
{
"code": null,
"e": 24574,
"s": 24546,
"text": "\n24 Jan, 2022"
},
{
"code": null,
"e": 24839,
"s": 24574,
"text": "DateFormat class of java.text package is an abstract class that is used to format and parse dates for any locale. It allows us to format date to text and parse text to date. DateFormat class provides many functionalities to obtain, format, parse default date/time."
},
{
"code": null,
"e": 25111,
"s": 24839,
"text": "Note: DateFormat class extends Format class that means it is a subclass of Format class. Since DateFormat class is an abstract class, therefore, it can be used for date/time formatting subclasses, which format and parses dates or times in a language-independent manner. "
},
{
"code": null,
"e": 25125,
"s": 25111,
"text": "Package-view:"
},
{
"code": null,
"e": 25209,
"s": 25125,
"text": "java.text Package\n DateFormat Class\n parse(string , ParsePosition) Method"
},
{
"code": null,
"e": 25424,
"s": 25209,
"text": "The parse(String the_text, ParsePosition position) method of DateFormat class is used to parse the text from a string to produce the Date. The method parses the text starting at the index given by a start position."
},
{
"code": null,
"e": 25433,
"s": 25424,
"text": "Syntax: "
},
{
"code": null,
"e": 25501,
"s": 25433,
"text": "public abstract Date parse(String the_text, ParsePosition position)"
},
{
"code": null,
"e": 25537,
"s": 25501,
"text": "Parameters: It takes 2 parameters: "
},
{
"code": null,
"e": 25642,
"s": 25537,
"text": "the_text: This is of the String type and refers to the string which is to be parsed to produce the date."
},
{
"code": null,
"e": 25755,
"s": 25642,
"text": "position: This is of ParsePosition object type and refers to the information of the starting index of the parse."
},
{
"code": null,
"e": 25837,
"s": 25755,
"text": "Return Type: Returns the Date parsed from the string or Null in case of an error."
},
{
"code": null,
"e": 25848,
"s": 25837,
"text": "Example 1:"
},
{
"code": null,
"e": 25853,
"s": 25848,
"text": "Java"
},
{
"code": "// Java Program to Illustrate parse() Method// of DateFormat Class // Importing required classesimport java.text.*;import java.util.Calendar; // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating object of DateFormat class inside main() DateFormat DFormat = new SimpleDateFormat(\"MM/ dd/ yy\"); // Try block to check for exceptions try { Calendar cal = Calendar.getInstance(); // Parsing date From string // using parse() method of DateFormat class // Custom string date String dt = \"10/ 27/ 16\"; // Printing the above unparsed date System.out.println(\"The unparsed\" + \" string is: \" + dt); // Parsing date using parse() method cal.setTime(DFormat.parse(dt)); // Printing the parsed time System.out.println(\"Time parsed: \" + cal.getTime()); } // Catch block to handle exceptions catch (ParseException except) { // Display exceptions with line number // using printStackTrace() method except.printStackTrace(); } }}",
"e": 27123,
"s": 25853,
"text": null
},
{
"code": null,
"e": 27200,
"s": 27123,
"text": "The unparsed string is: 10/ 27/ 16\nTime parsed: Thu Oct 27 00:00:00 UTC 2016"
},
{
"code": null,
"e": 27213,
"s": 27202,
"text": "Example 2:"
},
{
"code": null,
"e": 27218,
"s": 27213,
"text": "Java"
},
{
"code": "// Java Program to Illustrate parse() Method// of DateFormat Class // Importing required classesimport java.text.*;import java.util.Calendar; // Main classpublic class GFG { // Main driver method public static void main(String[] args) { // Creating an object of DateFormat class DateFormat DFormat = new SimpleDateFormat(\"MM/ dd/ yy\"); // Try bloc kto check for exceptions try { // Getting instance from calendar Calendar cal = Calendar.getInstance(); // Parsing date from string // using parse() method String dt = \"01/ 29/ 19\"; // Displaying the unparsed date System.out.println(\"The unparsed\" + \" string is: \" + dt); // Parsing date cal.setTime(DFormat.parse(dt)); System.out.println(\"Time parsed: \" + cal.getTime()); } // Catch block to handle the exceptions catch (ParseException except) { // Display exception with line number // using printStackTrace() method except.printStackTrace(); } }}",
"e": 28407,
"s": 27218,
"text": null
},
{
"code": null,
"e": 28484,
"s": 28407,
"text": "The unparsed string is: 01/ 29/ 19\nTime parsed: Tue Jan 29 00:00:00 UTC 2019"
},
{
"code": null,
"e": 28494,
"s": 28484,
"text": "kalrap615"
},
{
"code": null,
"e": 28508,
"s": 28494,
"text": "solankimayank"
},
{
"code": null,
"e": 28524,
"s": 28508,
"text": "Java-DateFormat"
},
{
"code": null,
"e": 28539,
"s": 28524,
"text": "Java-Functions"
},
{
"code": null,
"e": 28557,
"s": 28539,
"text": "Java-text package"
},
{
"code": null,
"e": 28562,
"s": 28557,
"text": "Java"
},
{
"code": null,
"e": 28567,
"s": 28562,
"text": "Java"
},
{
"code": null,
"e": 28665,
"s": 28567,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28697,
"s": 28665,
"text": "Initialize an ArrayList in Java"
},
{
"code": null,
"e": 28716,
"s": 28697,
"text": "Interfaces in Java"
},
{
"code": null,
"e": 28734,
"s": 28716,
"text": "ArrayList in Java"
},
{
"code": null,
"e": 28766,
"s": 28734,
"text": "Multidimensional Arrays in Java"
},
{
"code": null,
"e": 28786,
"s": 28766,
"text": "Stack Class in Java"
},
{
"code": null,
"e": 28801,
"s": 28786,
"text": "Stream In Java"
},
{
"code": null,
"e": 28825,
"s": 28801,
"text": "Singleton Class in Java"
},
{
"code": null,
"e": 28837,
"s": 28825,
"text": "Set in Java"
},
{
"code": null,
"e": 28856,
"s": 28837,
"text": "Overriding in Java"
}
] |
A brief introduction to two data processing architectures β Lambda and Kappa for Big Data | by Iman Samizadeh, Ph.D. | Towards Data Science
|
Big Data, Internet of things (IoT), Machine learning models and various other modern systems are becoming an inevitable reality today. People from all walks of life have started to interact with data storages and servers as a part of their daily routine. Therefore we can say that dealing with big data in the best possible manner is becoming the main area of interest for businesses, scientists and individuals. For instance an application launched for achieving certain business goals will be more successful if it can efficiently handle the queries made by customers and serve their purpose well. Such applications need to interact with data storage and in this article weβll try to explore two important data processing architectures that serve as the backbone of various enterprise applications known as Lambda and Kappa.
The rapid growth of social media applications, cloud based systems, Internet of things and an unending spree of innovations has made it important for a developer or a data scientist to take well calculated decisions while launching, upgrading or troubleshooting an enterprise application. Although it has been widely accepted and understood that using a modular approach to build an application has multiple advantages and long term benefits, the pursuit for selecting the right data processing architecture still keeps putting question marks in front of many proposals related to existing and upcoming enterprise software. Although there are various data processing architectures being followed around the globe these days letβs investigate the Lambda and Kappa architectures in detail and find out what makes each of them special and in what circumstances one should be preferred over another.
Lambda architecture is a data processing technique that is capable of dealing with huge amount of data in an efficient manner. The efficiency of this architecture becomes evident in the form of increased throughput, reduced latency and negligible errors. While we mention data processing we basically use this term to represent high throughput, low latency and aiming for near-real-time applications. Which also would allow the developers to define delta rules in the form of code logic or natural language processing (NLP) in event-based data processing models to achieve robustness, automation and efficiency and improve the data quality. Moreover, any change in the state of data is an event to the system and as a matter of fact it is possible to give a command, queried or expected to carry out delta procedures as a response to the events on the fly.
Event sourcing is a concept of using the events to make prediction as well as storing the changes in a system on the real time basis a change of state of a system, an update in the databases or an event can be understood as a change. For instance if someone interact with a web page or a social network profile, the events like page view, likes or Add as a Friend request etc... are triggering events that can be processed or enriched and the data stored in a database.
Data processing deals with the event streams and most of the enterprise software that follow the Domain Driven Design use the stream processing method to predict updates for the basic model and store the distinct events that serve as a source for predictions in a live data system. To handle numerous events occurring in a system or delta processing, Lambda architecture enabling data processing by introducing three distinct layers. Lambda architecture comprises of Batch Layer, Speed Layer (also known as Stream layer) and Serving Layer.
New data keeps coming as a feed to the data system. At every instance it is fed to the batch layer and speed layer simultaneously. Any new data stream that comes to batch layer of the data system is computed and processed on top of a Data Lake. When data gets stored in the data lake using databases such as in memory databases or long term persistent one like NoSQL based storages batch layer uses it to process the data using MapReduce or utilizing machine-learning (ML) to make predictions for the upcoming batch views.
The speed layer uses the fruit of event sourcing done at the batch layer. The data streams processed in the batch layer result in updating delta process or MapReduce or machine learning model which is further used by the stream layer to process the new data fed to it. Speed layer provides the outputs on the basis enrichment process and supports the serving layer to reduce the latency in responding the queries. As obvious from its name the speed layer has low latency because it deals with the real time data only and has less computational load.
The outputs from batch layer in the form of batch views and from speed layer in the form of near-real time views are forwarded to the serving layer which uses this data to cater the pending queries on ad-hoc basis.
Here is a basic diagram of what Lambda Architecture model would look like:
Letβs translate that to a functional equation which defines any query in big data domain. The symbols used in this equation are known as Lambda and the name for the Lambda architecture is also coined from the same equation. This function is widely known to those who are familiar with tidbits of big data analysis.
Query = Ξ» (Complete data) = Ξ» (live streaming data) * Ξ» (Stored data)
The equation means that all the data related queries can be catered in the Lambda architecture by combining the results from historical storage in the form of batches and live streaming with the help of speed layer.
Applications of Lambda Architecture
Lambda architecture can be deployed for those data processing enterprise models where:
User queries are required to be served on ad-hoc basis using the immutable data storage.
Quick responses are required and system should be capable of handling various updates in the form of new data streams.
None of the stored records shall be erased and it should allow addition of updates and new data to the database.
Lambda architecture can be considered as near real-time data processing architecture. As mentioned above, it can withstand the faults as well as allows scalability. It uses the functions of batch layer and stream layer and keeps adding new data to the main storage while ensuring that the existing data will remain intact. Companies like Twitter, Netflix, and Yahoo are using this architecture to meet the quality of service standards.
Batch layer of Lambda architecture manages historical data with the fault tolerant distributed storage which ensures low possibility of errors even if the system crashes.
It is a good balance of speed and reliability.
Fault tolerant and scalable architecture for data processing.
It can result in coding overhead due to involvement of comprehensive processing.
Re-processes every batch cycle which is not beneficial in certain scenarios.
A data modeled with Lambda architecture is difficult to migrate or reorganize.
In 2014 Jay Kreps started a discussion where he pointed out some discrepancies of Lambda architecture that further led the big data world to another alternate architecture that used less code resource and was capable of performing well in certain enterprise scenarios where using multi layered Lambda architecture seemed like extravagance.
Kappa Architecture cannot be taken as a substitute of Lambda architecture on the contrary it should be seen as an alternative to be used in those circumstances where active performance of batch layer is not necessary for meeting the standard quality of service. This architecture finds its applications in real-time processing of distinct events. Here is a basic diagram for the Kappa architecture that shows two layers system of operation for this data processing architecture.
Letβs translate the operational sequencing of the kappa architecture to a functional equation which defines any query in big data domain.
Query = K (New Data) = K (Live streaming data)
The equation means that all the queries can be catered by applying kappa function to the live streams of data at the speed layer. It also signifies that that the stream processing occurs on the speed layer in kappa architecture.
Some variants of social network applications, devices connected to a cloud based monitoring system, Internet of things (IoT) use an optimized version of Lambda architecture which mainly uses the services of speed layer combined with streaming layer to process the data over the data lake.
Kappa architecture can be deployed for those data processing enterprise models where:
Multiple data events or queries are logged in a queue to be catered against a distributed file system storage or history.
The order of the events and queries is not predetermined. Stream processing platforms can interact with database at any time.
It is resilient and highly available as handling Terabytes of storage is required for each node of the system to support replication.
The above mentioned data scenarios are handled by exhausting Apache Kafka which is extremely fast, fault tolerant and horizontally scalable. It allows a better mechanism for governing the data-streams. A balanced control on the stream processors and databases makes it possible for the applications to perform as per expectations. Kafka retains the ordered data for longer durations and caters the analogous queries by linking them to the appropriate position of the retained log. LinkedIn and some other applications use this flavor of big data processing and reap the benefit of retaining large amount of data to cater those queries that are mere replica of each other.
Kappa architecture can be used to develop data systems that are online learners and therefore donβt need the batch layer.
Re-processing is required only when the code changes.
It can be deployed with fixed memory.
It can be used for horizontally scalable systems.
Fewer resources are required as the machine learning is being done on the real time basis.
Absence of batch layer might result in errors during data processing or while updating the database that requires having an exception manager to reprocess the data or reconciliation.
In short the choice between Lambda and Kappa architectures seems like a tradeoff. If you seek youβre an architecture that is more reliable in updating the data lake as well as efficient in devising the machine learning models to predict upcoming events in a robust manner you should use the Lambda architecture as it reaps the benefits of batch layer and speed layer to ensure less errors and speed. On the other hand if you want to deploy big data architecture by using less expensive hardware and require it to deal effectively on the basis of unique events occurring on the runtime then select the Kappa architecture for your real-time data processing needs.
|
[
{
"code": null,
"e": 999,
"s": 172,
"text": "Big Data, Internet of things (IoT), Machine learning models and various other modern systems are becoming an inevitable reality today. People from all walks of life have started to interact with data storages and servers as a part of their daily routine. Therefore we can say that dealing with big data in the best possible manner is becoming the main area of interest for businesses, scientists and individuals. For instance an application launched for achieving certain business goals will be more successful if it can efficiently handle the queries made by customers and serve their purpose well. Such applications need to interact with data storage and in this article weβll try to explore two important data processing architectures that serve as the backbone of various enterprise applications known as Lambda and Kappa."
},
{
"code": null,
"e": 1895,
"s": 999,
"text": "The rapid growth of social media applications, cloud based systems, Internet of things and an unending spree of innovations has made it important for a developer or a data scientist to take well calculated decisions while launching, upgrading or troubleshooting an enterprise application. Although it has been widely accepted and understood that using a modular approach to build an application has multiple advantages and long term benefits, the pursuit for selecting the right data processing architecture still keeps putting question marks in front of many proposals related to existing and upcoming enterprise software. Although there are various data processing architectures being followed around the globe these days letβs investigate the Lambda and Kappa architectures in detail and find out what makes each of them special and in what circumstances one should be preferred over another."
},
{
"code": null,
"e": 2752,
"s": 1895,
"text": "Lambda architecture is a data processing technique that is capable of dealing with huge amount of data in an efficient manner. The efficiency of this architecture becomes evident in the form of increased throughput, reduced latency and negligible errors. While we mention data processing we basically use this term to represent high throughput, low latency and aiming for near-real-time applications. Which also would allow the developers to define delta rules in the form of code logic or natural language processing (NLP) in event-based data processing models to achieve robustness, automation and efficiency and improve the data quality. Moreover, any change in the state of data is an event to the system and as a matter of fact it is possible to give a command, queried or expected to carry out delta procedures as a response to the events on the fly."
},
{
"code": null,
"e": 3222,
"s": 2752,
"text": "Event sourcing is a concept of using the events to make prediction as well as storing the changes in a system on the real time basis a change of state of a system, an update in the databases or an event can be understood as a change. For instance if someone interact with a web page or a social network profile, the events like page view, likes or Add as a Friend request etc... are triggering events that can be processed or enriched and the data stored in a database."
},
{
"code": null,
"e": 3762,
"s": 3222,
"text": "Data processing deals with the event streams and most of the enterprise software that follow the Domain Driven Design use the stream processing method to predict updates for the basic model and store the distinct events that serve as a source for predictions in a live data system. To handle numerous events occurring in a system or delta processing, Lambda architecture enabling data processing by introducing three distinct layers. Lambda architecture comprises of Batch Layer, Speed Layer (also known as Stream layer) and Serving Layer."
},
{
"code": null,
"e": 4285,
"s": 3762,
"text": "New data keeps coming as a feed to the data system. At every instance it is fed to the batch layer and speed layer simultaneously. Any new data stream that comes to batch layer of the data system is computed and processed on top of a Data Lake. When data gets stored in the data lake using databases such as in memory databases or long term persistent one like NoSQL based storages batch layer uses it to process the data using MapReduce or utilizing machine-learning (ML) to make predictions for the upcoming batch views."
},
{
"code": null,
"e": 4835,
"s": 4285,
"text": "The speed layer uses the fruit of event sourcing done at the batch layer. The data streams processed in the batch layer result in updating delta process or MapReduce or machine learning model which is further used by the stream layer to process the new data fed to it. Speed layer provides the outputs on the basis enrichment process and supports the serving layer to reduce the latency in responding the queries. As obvious from its name the speed layer has low latency because it deals with the real time data only and has less computational load."
},
{
"code": null,
"e": 5050,
"s": 4835,
"text": "The outputs from batch layer in the form of batch views and from speed layer in the form of near-real time views are forwarded to the serving layer which uses this data to cater the pending queries on ad-hoc basis."
},
{
"code": null,
"e": 5125,
"s": 5050,
"text": "Here is a basic diagram of what Lambda Architecture model would look like:"
},
{
"code": null,
"e": 5440,
"s": 5125,
"text": "Letβs translate that to a functional equation which defines any query in big data domain. The symbols used in this equation are known as Lambda and the name for the Lambda architecture is also coined from the same equation. This function is widely known to those who are familiar with tidbits of big data analysis."
},
{
"code": null,
"e": 5510,
"s": 5440,
"text": "Query = Ξ» (Complete data) = Ξ» (live streaming data) * Ξ» (Stored data)"
},
{
"code": null,
"e": 5726,
"s": 5510,
"text": "The equation means that all the data related queries can be catered in the Lambda architecture by combining the results from historical storage in the form of batches and live streaming with the help of speed layer."
},
{
"code": null,
"e": 5762,
"s": 5726,
"text": "Applications of Lambda Architecture"
},
{
"code": null,
"e": 5849,
"s": 5762,
"text": "Lambda architecture can be deployed for those data processing enterprise models where:"
},
{
"code": null,
"e": 5938,
"s": 5849,
"text": "User queries are required to be served on ad-hoc basis using the immutable data storage."
},
{
"code": null,
"e": 6057,
"s": 5938,
"text": "Quick responses are required and system should be capable of handling various updates in the form of new data streams."
},
{
"code": null,
"e": 6170,
"s": 6057,
"text": "None of the stored records shall be erased and it should allow addition of updates and new data to the database."
},
{
"code": null,
"e": 6606,
"s": 6170,
"text": "Lambda architecture can be considered as near real-time data processing architecture. As mentioned above, it can withstand the faults as well as allows scalability. It uses the functions of batch layer and stream layer and keeps adding new data to the main storage while ensuring that the existing data will remain intact. Companies like Twitter, Netflix, and Yahoo are using this architecture to meet the quality of service standards."
},
{
"code": null,
"e": 6777,
"s": 6606,
"text": "Batch layer of Lambda architecture manages historical data with the fault tolerant distributed storage which ensures low possibility of errors even if the system crashes."
},
{
"code": null,
"e": 6824,
"s": 6777,
"text": "It is a good balance of speed and reliability."
},
{
"code": null,
"e": 6886,
"s": 6824,
"text": "Fault tolerant and scalable architecture for data processing."
},
{
"code": null,
"e": 6967,
"s": 6886,
"text": "It can result in coding overhead due to involvement of comprehensive processing."
},
{
"code": null,
"e": 7044,
"s": 6967,
"text": "Re-processes every batch cycle which is not beneficial in certain scenarios."
},
{
"code": null,
"e": 7123,
"s": 7044,
"text": "A data modeled with Lambda architecture is difficult to migrate or reorganize."
},
{
"code": null,
"e": 7463,
"s": 7123,
"text": "In 2014 Jay Kreps started a discussion where he pointed out some discrepancies of Lambda architecture that further led the big data world to another alternate architecture that used less code resource and was capable of performing well in certain enterprise scenarios where using multi layered Lambda architecture seemed like extravagance."
},
{
"code": null,
"e": 7942,
"s": 7463,
"text": "Kappa Architecture cannot be taken as a substitute of Lambda architecture on the contrary it should be seen as an alternative to be used in those circumstances where active performance of batch layer is not necessary for meeting the standard quality of service. This architecture finds its applications in real-time processing of distinct events. Here is a basic diagram for the Kappa architecture that shows two layers system of operation for this data processing architecture."
},
{
"code": null,
"e": 8080,
"s": 7942,
"text": "Letβs translate the operational sequencing of the kappa architecture to a functional equation which defines any query in big data domain."
},
{
"code": null,
"e": 8127,
"s": 8080,
"text": "Query = K (New Data) = K (Live streaming data)"
},
{
"code": null,
"e": 8356,
"s": 8127,
"text": "The equation means that all the queries can be catered by applying kappa function to the live streams of data at the speed layer. It also signifies that that the stream processing occurs on the speed layer in kappa architecture."
},
{
"code": null,
"e": 8645,
"s": 8356,
"text": "Some variants of social network applications, devices connected to a cloud based monitoring system, Internet of things (IoT) use an optimized version of Lambda architecture which mainly uses the services of speed layer combined with streaming layer to process the data over the data lake."
},
{
"code": null,
"e": 8731,
"s": 8645,
"text": "Kappa architecture can be deployed for those data processing enterprise models where:"
},
{
"code": null,
"e": 8853,
"s": 8731,
"text": "Multiple data events or queries are logged in a queue to be catered against a distributed file system storage or history."
},
{
"code": null,
"e": 8979,
"s": 8853,
"text": "The order of the events and queries is not predetermined. Stream processing platforms can interact with database at any time."
},
{
"code": null,
"e": 9113,
"s": 8979,
"text": "It is resilient and highly available as handling Terabytes of storage is required for each node of the system to support replication."
},
{
"code": null,
"e": 9785,
"s": 9113,
"text": "The above mentioned data scenarios are handled by exhausting Apache Kafka which is extremely fast, fault tolerant and horizontally scalable. It allows a better mechanism for governing the data-streams. A balanced control on the stream processors and databases makes it possible for the applications to perform as per expectations. Kafka retains the ordered data for longer durations and caters the analogous queries by linking them to the appropriate position of the retained log. LinkedIn and some other applications use this flavor of big data processing and reap the benefit of retaining large amount of data to cater those queries that are mere replica of each other."
},
{
"code": null,
"e": 9907,
"s": 9785,
"text": "Kappa architecture can be used to develop data systems that are online learners and therefore donβt need the batch layer."
},
{
"code": null,
"e": 9961,
"s": 9907,
"text": "Re-processing is required only when the code changes."
},
{
"code": null,
"e": 9999,
"s": 9961,
"text": "It can be deployed with fixed memory."
},
{
"code": null,
"e": 10049,
"s": 9999,
"text": "It can be used for horizontally scalable systems."
},
{
"code": null,
"e": 10140,
"s": 10049,
"text": "Fewer resources are required as the machine learning is being done on the real time basis."
},
{
"code": null,
"e": 10323,
"s": 10140,
"text": "Absence of batch layer might result in errors during data processing or while updating the database that requires having an exception manager to reprocess the data or reconciliation."
}
] |
Changing the appearance of a Scrollbar in Tkinter (using ttk styles)
|
Scrollbars are used to wrap an amount of text or characters in a frame or window. It provides a text widget to contain as many characters as the user wants.
The Scrollbar can be of two types: Horizontal Scrollbar and Vertical Scrollbar.
The length of a scrollbar changes whenever the number of characters in the Text widget increases. We can configure the style of Scrollbar by using ttk.Scrollbar. Ttk provides many inbuilt features and attributes that can be used to configure the Scrollbar.
In this example, we will add a vertical scrollbar in a Text widget. We will use a ttk style theme to customize the look of the scrollbar. We have used here the 'classic' theme. Refer this link for a complete list ttk themes.
# Import the required libraries
from tkinter import *
from tkinter import ttk
# Create an instance of Tkinter Frame
win = Tk()
# Set the geometry of Tkinter Frame
win.geometry("700x250")
style=ttk.Style()
style.theme_use('classic')
style.configure("Vertical.TScrollbar", background="green", bordercolor="red", arrowcolor="white")
# Create a vertical scrollbar
scrollbar = ttk.Scrollbar(win, orient='vertical')
scrollbar.pack(side=RIGHT, fill=BOTH)
# Add a Text Widget
text = Text(win, width=15, height=15, wrap=CHAR,
yscrollcommand=scrollbar.set)
for i in range(1000):
text.insert(END, i)
text.pack(side=TOP, fill=X)
# Configure the scrollbar
scrollbar.config(command=text.yview)
win.mainloop()
Running the above code will display a window with a text widget and a customized vertical Scrollbar.
|
[
{
"code": null,
"e": 1219,
"s": 1062,
"text": "Scrollbars are used to wrap an amount of text or characters in a frame or window. It provides a text widget to contain as many characters as the user wants."
},
{
"code": null,
"e": 1299,
"s": 1219,
"text": "The Scrollbar can be of two types: Horizontal Scrollbar and Vertical Scrollbar."
},
{
"code": null,
"e": 1556,
"s": 1299,
"text": "The length of a scrollbar changes whenever the number of characters in the Text widget increases. We can configure the style of Scrollbar by using ttk.Scrollbar. Ttk provides many inbuilt features and attributes that can be used to configure the Scrollbar."
},
{
"code": null,
"e": 1781,
"s": 1556,
"text": "In this example, we will add a vertical scrollbar in a Text widget. We will use a ttk style theme to customize the look of the scrollbar. We have used here the 'classic' theme. Refer this link for a complete list ttk themes."
},
{
"code": null,
"e": 2488,
"s": 1781,
"text": "# Import the required libraries\nfrom tkinter import *\nfrom tkinter import ttk\n\n# Create an instance of Tkinter Frame\nwin = Tk()\n\n# Set the geometry of Tkinter Frame\nwin.geometry(\"700x250\")\n\nstyle=ttk.Style()\nstyle.theme_use('classic')\nstyle.configure(\"Vertical.TScrollbar\", background=\"green\", bordercolor=\"red\", arrowcolor=\"white\")\n\n# Create a vertical scrollbar\nscrollbar = ttk.Scrollbar(win, orient='vertical')\nscrollbar.pack(side=RIGHT, fill=BOTH)\n\n# Add a Text Widget\ntext = Text(win, width=15, height=15, wrap=CHAR,\nyscrollcommand=scrollbar.set)\n\nfor i in range(1000):\n text.insert(END, i)\n\ntext.pack(side=TOP, fill=X)\n\n# Configure the scrollbar\nscrollbar.config(command=text.yview)\n\nwin.mainloop()"
},
{
"code": null,
"e": 2589,
"s": 2488,
"text": "Running the above code will display a window with a text widget and a customized vertical Scrollbar."
}
] |
Loops in R (for, while, repeat) - GeeksforGeeks
|
21 Oct, 2021
In R programming, we require a control structure to run a block of code multiple times. Loops come in the class of the most fundamental and strong programming concepts. A loop is a control statement that allows multiple executions of a statement or a set of statements. The word βloopingβ means cycling or iterating.
A loop asks a query, in the loop structure. If the answer to that query requires an action, it will be executed. The same query is asked again and again until further action is taken. Any time the query is asked in the loop, it is known as an iteration of the loop. There are two components of a loop, the control statement, and the loop body. The control statement controls the execution of statements depending on the condition and the loop body consists of the set of statements to be executed.
In order to execute the identical lines of code numerous times in a program, a programmer can simply use a loop.
For Loop
While Loop
Repeat Loop
It is a type of control statement that enables one to easily construct a loop that has to run statements or a set of statements multiple times. For loop is commonly used to iterate over items of a sequence. It is an entry controlled loop, in this loop the test condition is tested first, then the body of the loop is executed, the loop body would not be executed if the test condition is false.
for (value in sequence)
{
statement
}
Below are some programs to illustrate the use of for loop in R programming.
Example 1: Program to display numbers from 1 to 5 using for loop in R.
R
# R program to demonstrate the use of for loop # using for loopfor (val in 1: 5){ # statement print(val)}
Output:
[1] 1
[1] 2
[1] 3
[1] 4
[1] 5
Here, for loop is iterated over a sequence having numbers from 1 to 5. In each iteration, each item of the sequence is displayed.
Example 2: Program to display days of a week.
R
# R program to illustrate# application of for loop # assigning strings to the vectorweek < - c('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday') # using for loop to iterate# over each string in the vectorfor (day in week){ # displaying each string in the vector print(day)}
Output:
[1] "Sunday"
[1] "Monday"
[1] "Tuesday"
[1] "Wednesday"
[1] "Thusrday"
[1] "Friday"
[1] "Saturday"
In the above program, initially, all the days(strings) of the week are assigned to the vector week. Then for loop is used to iterate over each string in a week. In each iteration, each day of the week is displayed.
It is a type of control statement which will run a statement or a set of statements repeatedly unless the given condition becomes false. It is also an entry controlled loop, in this loop the test condition is tested first, then the body of the loop is executed, the loop body would not be executed if the test condition is false.
while ( condition )
{
statement
}
Below are some programs to illustrate the use of the while loop in R programming.
Example 1: Program to display numbers from 1 to 5 using while loop in R.
R
# R program to demonstrate the use of while loop val = 1 # using while loopwhile (val <= 5){ # statements print(val) val = val + 1}
Output:
[1] 1
[1] 2
[1] 3
[1] 4
[1] 5
Initially, the variable value is initialized to 1. In each iteration of the while loop the condition is checked and the value of val is displayed and then it is incremented until it becomes 5 and the condition becomes false, the loop is terminated.
Example 2: Program to calculate factorial of a number.
R
# R program to illustrate# application of while loop # assigning value to the variable# whose factorial will be calculatedn < - 5 # assigning the factorial variable# and iteration variable to 1factorial < - 1i < - 1 # using while loopwhile (i <= n){ # multiplying the factorial variable # with the iteration variable factorial = factorial * i # incrementing the iteration variable i = i + 1} # displaying the factorialprint(factorial)
Output:
[1] 120
Here, at first, the variable n is assigned to 5 whose factorial is going to be calculated, then variable i and factorial are assigned to 1. i will be used for iterating over the loop, and factorial will be used for calculating the factorial. In each iteration of the loop, the condition is checked i.e. i should be less than or equal to 5, and after that factorial is multiplied with the value of i, then i is incremented. When i becomes 5, the loop is terminated and the factorial of 5 i.e. 120 is displayed beyond the scope of the loop.
It is a simple loop that will run the same statement or a group of statements repeatedly until the stop condition has been encountered. Repeat loop does not have any condition to terminate the loop, a programmer must specifically place a condition within the loopβs body and use the declaration of a break statement to terminate this loop. If no condition is present in the body of the repeat loop then it will iterate infinitely.
repeat
{
statement
if( condition )
{
break
}
}
To terminate the repeat loop, we use a jump statement that is the break keyword. Below are some programs to illustrate the use of repeat loops in R programming.
Example 1: Program to display numbers from 1 to 5 using repeat loop in R.
R
# R program to demonstrate the use of repeat loop val = 1 # using repeat looprepeat{ # statements print(val) val = val + 1 # checking stop condition if(val > 5) { # using break statement # to terminate the loop break }}
Output:
[1] 1
[1] 2
[1] 3
[1] 4
[1] 5
In the above program, the variable val is initialized to 1, then in each iteration of the repeat loop the value of val is displayed and then it is incremented until it becomes greater than 5. If the value of val becomes greater than 5 then break statement is used to terminate the loop.
Example 2: Program to display a statement five times.
R
# R program to illustrate# the application of repeat loop # initializing the iteration variable with 0i < - 0 # using repeat looprepeat{ # statement to be executed multiple times print("Geeks 4 geeks!") # incrementing the iteration variable i = i + 1 # checking the stop condition if (i == 5) { # using break statement # to terminate the loop break }}
Output:
[1] "Geeks 4 geeks!"
[1] "Geeks 4 geeks!"
[1] "Geeks 4 geeks!"
[1] "Geeks 4 geeks!"
[1] "Geeks 4 geeks!"
Here, initially the variable i is initialized with 0 then in each iteration of the repeat loop after printing Geeks 4 geeks! the value of i is incremented till it becomes 5 and the condition in the if statement becomes true then, the break statement is executed to terminate the repeat loop.
We use a jump statement in loops to terminate the loop at a particular iteration or to skip a particular iteration in the loop. The two most commonly used jump statements in loops are:
Break Statement: The break keyword is a jump statement that is used to terminate the loop at a particular iteration.
Example:
R
# R program to illustrate# the use of break statement # using for loop# to iterate over a sequencefor (val in 1: 5){ # checking condition if (val == 3) { # using break keyword break } # displaying items in the sequence print(val)}
Output:
[1] 1
[1] 2
In the above program, if the value of val becomes 3 then the break statement will be executed and the loop will terminate.
Next Statement: The next keyword is a jump statement which is used to skip a particular iteration in the loop.
Example:
R
# R program to illustrate# the use of next statement # using for loop# to iterate over the sequencefor (val in 1: 5){ # checking condition if (val == 3) { # using next keyword next } # displaying items in the sequence print(val)}
Output:
[1] 1
[1] 2
[1] 4
[1] 5
In the above program, if the value of Val becomes 3 then the next statement will be executed hence the current iteration of the loop will be skipped. So 3 is not displayed in the output.
As we can conclude from the above two programs the basic difference between the two jump statements is that the break statement terminates the loop and the next statement skips a particular iteration of the loop.
kumar_satyam
Picked
R Language
Write From Home
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Replace specific values in column in R DataFrame ?
Filter data by multiple conditions in R using Dplyr
Change Color of Bars in Barchart using ggplot2 in R
How to change Row Names of DataFrame in R ?
Group by function in R using Dplyr
Convert integer to string in Python
Convert string to integer in Python
How to set input type date in dd-mm-yyyy format using HTML ?
Python infinity
Matplotlib.pyplot.title() in Python
|
[
{
"code": null,
"e": 25814,
"s": 25786,
"text": "\n21 Oct, 2021"
},
{
"code": null,
"e": 26132,
"s": 25814,
"text": "In R programming, we require a control structure to run a block of code multiple times. Loops come in the class of the most fundamental and strong programming concepts. A loop is a control statement that allows multiple executions of a statement or a set of statements. The word βloopingβ means cycling or iterating. "
},
{
"code": null,
"e": 26631,
"s": 26132,
"text": "A loop asks a query, in the loop structure. If the answer to that query requires an action, it will be executed. The same query is asked again and again until further action is taken. Any time the query is asked in the loop, it is known as an iteration of the loop. There are two components of a loop, the control statement, and the loop body. The control statement controls the execution of statements depending on the condition and the loop body consists of the set of statements to be executed."
},
{
"code": null,
"e": 26745,
"s": 26631,
"text": "In order to execute the identical lines of code numerous times in a program, a programmer can simply use a loop. "
},
{
"code": null,
"e": 26754,
"s": 26745,
"text": "For Loop"
},
{
"code": null,
"e": 26765,
"s": 26754,
"text": "While Loop"
},
{
"code": null,
"e": 26777,
"s": 26765,
"text": "Repeat Loop"
},
{
"code": null,
"e": 27173,
"s": 26777,
"text": "It is a type of control statement that enables one to easily construct a loop that has to run statements or a set of statements multiple times. For loop is commonly used to iterate over items of a sequence. It is an entry controlled loop, in this loop the test condition is tested first, then the body of the loop is executed, the loop body would not be executed if the test condition is false. "
},
{
"code": null,
"e": 27213,
"s": 27173,
"text": "for (value in sequence)\n{\n statement\n}"
},
{
"code": null,
"e": 27289,
"s": 27213,
"text": "Below are some programs to illustrate the use of for loop in R programming."
},
{
"code": null,
"e": 27361,
"s": 27289,
"text": "Example 1: Program to display numbers from 1 to 5 using for loop in R. "
},
{
"code": null,
"e": 27363,
"s": 27361,
"text": "R"
},
{
"code": "# R program to demonstrate the use of for loop # using for loopfor (val in 1: 5){ # statement print(val)}",
"e": 27475,
"s": 27363,
"text": null
},
{
"code": null,
"e": 27484,
"s": 27475,
"text": "Output: "
},
{
"code": null,
"e": 27514,
"s": 27484,
"text": "[1] 1\n[1] 2\n[1] 3\n[1] 4\n[1] 5"
},
{
"code": null,
"e": 27645,
"s": 27514,
"text": "Here, for loop is iterated over a sequence having numbers from 1 to 5. In each iteration, each item of the sequence is displayed. "
},
{
"code": null,
"e": 27692,
"s": 27645,
"text": "Example 2: Program to display days of a week. "
},
{
"code": null,
"e": 27694,
"s": 27692,
"text": "R"
},
{
"code": "# R program to illustrate# application of for loop # assigning strings to the vectorweek < - c('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday') # using for loop to iterate# over each string in the vectorfor (day in week){ # displaying each string in the vector print(day)}",
"e": 28063,
"s": 27694,
"text": null
},
{
"code": null,
"e": 28072,
"s": 28063,
"text": "Output: "
},
{
"code": null,
"e": 28171,
"s": 28072,
"text": "[1] \"Sunday\"\n[1] \"Monday\"\n[1] \"Tuesday\"\n[1] \"Wednesday\"\n[1] \"Thusrday\"\n[1] \"Friday\"\n[1] \"Saturday\""
},
{
"code": null,
"e": 28387,
"s": 28171,
"text": "In the above program, initially, all the days(strings) of the week are assigned to the vector week. Then for loop is used to iterate over each string in a week. In each iteration, each day of the week is displayed. "
},
{
"code": null,
"e": 28718,
"s": 28387,
"text": "It is a type of control statement which will run a statement or a set of statements repeatedly unless the given condition becomes false. It is also an entry controlled loop, in this loop the test condition is tested first, then the body of the loop is executed, the loop body would not be executed if the test condition is false. "
},
{
"code": null,
"e": 28755,
"s": 28718,
"text": "while ( condition ) \n{\n statement\n}"
},
{
"code": null,
"e": 28837,
"s": 28755,
"text": "Below are some programs to illustrate the use of the while loop in R programming."
},
{
"code": null,
"e": 28911,
"s": 28837,
"text": "Example 1: Program to display numbers from 1 to 5 using while loop in R. "
},
{
"code": null,
"e": 28913,
"s": 28911,
"text": "R"
},
{
"code": "# R program to demonstrate the use of while loop val = 1 # using while loopwhile (val <= 5){ # statements print(val) val = val + 1}",
"e": 29054,
"s": 28913,
"text": null
},
{
"code": null,
"e": 29063,
"s": 29054,
"text": "Output: "
},
{
"code": null,
"e": 29093,
"s": 29063,
"text": "[1] 1\n[1] 2\n[1] 3\n[1] 4\n[1] 5"
},
{
"code": null,
"e": 29343,
"s": 29093,
"text": "Initially, the variable value is initialized to 1. In each iteration of the while loop the condition is checked and the value of val is displayed and then it is incremented until it becomes 5 and the condition becomes false, the loop is terminated. "
},
{
"code": null,
"e": 29399,
"s": 29343,
"text": "Example 2: Program to calculate factorial of a number. "
},
{
"code": null,
"e": 29401,
"s": 29399,
"text": "R"
},
{
"code": "# R program to illustrate# application of while loop # assigning value to the variable# whose factorial will be calculatedn < - 5 # assigning the factorial variable# and iteration variable to 1factorial < - 1i < - 1 # using while loopwhile (i <= n){ # multiplying the factorial variable # with the iteration variable factorial = factorial * i # incrementing the iteration variable i = i + 1} # displaying the factorialprint(factorial)",
"e": 29853,
"s": 29401,
"text": null
},
{
"code": null,
"e": 29862,
"s": 29853,
"text": "Output: "
},
{
"code": null,
"e": 29870,
"s": 29862,
"text": "[1] 120"
},
{
"code": null,
"e": 30410,
"s": 29870,
"text": "Here, at first, the variable n is assigned to 5 whose factorial is going to be calculated, then variable i and factorial are assigned to 1. i will be used for iterating over the loop, and factorial will be used for calculating the factorial. In each iteration of the loop, the condition is checked i.e. i should be less than or equal to 5, and after that factorial is multiplied with the value of i, then i is incremented. When i becomes 5, the loop is terminated and the factorial of 5 i.e. 120 is displayed beyond the scope of the loop. "
},
{
"code": null,
"e": 30841,
"s": 30410,
"text": "It is a simple loop that will run the same statement or a group of statements repeatedly until the stop condition has been encountered. Repeat loop does not have any condition to terminate the loop, a programmer must specifically place a condition within the loopβs body and use the declaration of a break statement to terminate this loop. If no condition is present in the body of the repeat loop then it will iterate infinitely."
},
{
"code": null,
"e": 30911,
"s": 30841,
"text": "repeat \n{ \n statement\n \n if( condition ) \n {\n break\n }\n}"
},
{
"code": null,
"e": 31072,
"s": 30911,
"text": "To terminate the repeat loop, we use a jump statement that is the break keyword. Below are some programs to illustrate the use of repeat loops in R programming."
},
{
"code": null,
"e": 31147,
"s": 31072,
"text": "Example 1: Program to display numbers from 1 to 5 using repeat loop in R. "
},
{
"code": null,
"e": 31149,
"s": 31147,
"text": "R"
},
{
"code": "# R program to demonstrate the use of repeat loop val = 1 # using repeat looprepeat{ # statements print(val) val = val + 1 # checking stop condition if(val > 5) { # using break statement # to terminate the loop break }}",
"e": 31412,
"s": 31149,
"text": null
},
{
"code": null,
"e": 31421,
"s": 31412,
"text": "Output: "
},
{
"code": null,
"e": 31451,
"s": 31421,
"text": "[1] 1\n[1] 2\n[1] 3\n[1] 4\n[1] 5"
},
{
"code": null,
"e": 31738,
"s": 31451,
"text": "In the above program, the variable val is initialized to 1, then in each iteration of the repeat loop the value of val is displayed and then it is incremented until it becomes greater than 5. If the value of val becomes greater than 5 then break statement is used to terminate the loop."
},
{
"code": null,
"e": 31793,
"s": 31738,
"text": "Example 2: Program to display a statement five times. "
},
{
"code": null,
"e": 31795,
"s": 31793,
"text": "R"
},
{
"code": "# R program to illustrate# the application of repeat loop # initializing the iteration variable with 0i < - 0 # using repeat looprepeat{ # statement to be executed multiple times print(\"Geeks 4 geeks!\") # incrementing the iteration variable i = i + 1 # checking the stop condition if (i == 5) { # using break statement # to terminate the loop break }}",
"e": 32194,
"s": 31795,
"text": null
},
{
"code": null,
"e": 32203,
"s": 32194,
"text": "Output: "
},
{
"code": null,
"e": 32308,
"s": 32203,
"text": "[1] \"Geeks 4 geeks!\"\n[1] \"Geeks 4 geeks!\"\n[1] \"Geeks 4 geeks!\"\n[1] \"Geeks 4 geeks!\"\n[1] \"Geeks 4 geeks!\""
},
{
"code": null,
"e": 32601,
"s": 32308,
"text": "Here, initially the variable i is initialized with 0 then in each iteration of the repeat loop after printing Geeks 4 geeks! the value of i is incremented till it becomes 5 and the condition in the if statement becomes true then, the break statement is executed to terminate the repeat loop. "
},
{
"code": null,
"e": 32787,
"s": 32601,
"text": "We use a jump statement in loops to terminate the loop at a particular iteration or to skip a particular iteration in the loop. The two most commonly used jump statements in loops are: "
},
{
"code": null,
"e": 32904,
"s": 32787,
"text": "Break Statement: The break keyword is a jump statement that is used to terminate the loop at a particular iteration."
},
{
"code": null,
"e": 32913,
"s": 32904,
"text": "Example:"
},
{
"code": null,
"e": 32915,
"s": 32913,
"text": "R"
},
{
"code": "# R program to illustrate# the use of break statement # using for loop# to iterate over a sequencefor (val in 1: 5){ # checking condition if (val == 3) { # using break keyword break } # displaying items in the sequence print(val)}",
"e": 33179,
"s": 32915,
"text": null
},
{
"code": null,
"e": 33188,
"s": 33179,
"text": "Output: "
},
{
"code": null,
"e": 33200,
"s": 33188,
"text": "[1] 1\n[1] 2"
},
{
"code": null,
"e": 33323,
"s": 33200,
"text": "In the above program, if the value of val becomes 3 then the break statement will be executed and the loop will terminate."
},
{
"code": null,
"e": 33434,
"s": 33323,
"text": "Next Statement: The next keyword is a jump statement which is used to skip a particular iteration in the loop."
},
{
"code": null,
"e": 33444,
"s": 33434,
"text": "Example: "
},
{
"code": null,
"e": 33446,
"s": 33444,
"text": "R"
},
{
"code": "# R program to illustrate# the use of next statement # using for loop# to iterate over the sequencefor (val in 1: 5){ # checking condition if (val == 3) { # using next keyword next } # displaying items in the sequence print(val)}",
"e": 33709,
"s": 33446,
"text": null
},
{
"code": null,
"e": 33718,
"s": 33709,
"text": "Output: "
},
{
"code": null,
"e": 33742,
"s": 33718,
"text": "[1] 1\n[1] 2\n[1] 4\n[1] 5"
},
{
"code": null,
"e": 33929,
"s": 33742,
"text": "In the above program, if the value of Val becomes 3 then the next statement will be executed hence the current iteration of the loop will be skipped. So 3 is not displayed in the output."
},
{
"code": null,
"e": 34142,
"s": 33929,
"text": "As we can conclude from the above two programs the basic difference between the two jump statements is that the break statement terminates the loop and the next statement skips a particular iteration of the loop."
},
{
"code": null,
"e": 34155,
"s": 34142,
"text": "kumar_satyam"
},
{
"code": null,
"e": 34162,
"s": 34155,
"text": "Picked"
},
{
"code": null,
"e": 34173,
"s": 34162,
"text": "R Language"
},
{
"code": null,
"e": 34189,
"s": 34173,
"text": "Write From Home"
},
{
"code": null,
"e": 34287,
"s": 34189,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 34345,
"s": 34287,
"text": "How to Replace specific values in column in R DataFrame ?"
},
{
"code": null,
"e": 34397,
"s": 34345,
"text": "Filter data by multiple conditions in R using Dplyr"
},
{
"code": null,
"e": 34449,
"s": 34397,
"text": "Change Color of Bars in Barchart using ggplot2 in R"
},
{
"code": null,
"e": 34493,
"s": 34449,
"text": "How to change Row Names of DataFrame in R ?"
},
{
"code": null,
"e": 34528,
"s": 34493,
"text": "Group by function in R using Dplyr"
},
{
"code": null,
"e": 34564,
"s": 34528,
"text": "Convert integer to string in Python"
},
{
"code": null,
"e": 34600,
"s": 34564,
"text": "Convert string to integer in Python"
},
{
"code": null,
"e": 34661,
"s": 34600,
"text": "How to set input type date in dd-mm-yyyy format using HTML ?"
},
{
"code": null,
"e": 34677,
"s": 34661,
"text": "Python infinity"
}
] |
Lexicographically smallest rotated sequence | Set 2 - GeeksforGeeks
|
28 Apr, 2021
Write code to find lexicographic minimum in a circular array, e.g. for the array BCABDADAB, the lexicographic minimum is ABBCABDADInput Constraint: 1 < n < 1000 Examples:
Input: GEEKSQUIZ
Output: EEKSQUIZG
Input: GFG
Output: FGG
Input : CAPABCQ
Output : ABCQCAP
We have discussed a O(n2Logn) solution in Lexicographically minimum string rotation | Set 1. Here we need to find the starting index of minimum rotation and then print the rotation.
1) Initially assume 0 to be current min
starting index.
2) Loop through i = 1 to n-1.
a) For each i compare sequence starting
at i with current min starting index
b) If sequence starting at i is lexicographically
smaller, update current min starting
index.
Here is pseudo-code for algorithm
function findIndexForSmallestSequence(S, n):
result = 0
for i = 1:n-1
if (sequence beginning at i <
sequence beginning at result)
result = i
end if
end for
return result
Here is implementation of above algorithm.
C++
Java
Python 3
C#
PHP
Javascript
// C++ program to find lexicographically// smallest sequence with rotations.#include <iostream>using namespace std; // Function to compare lexicographically// two sequence with different starting// indexes. It returns true if sequence// beginning with y is lexicographically// greater.bool compareSeq(char S[], int x, int y, int n){ for (int i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true;} // Function to find starting index// of lexicographically smallest sequenceint smallestSequence(char S[], int n){ int index = 0; for (int i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index;} // Function to print lexicographically// smallest sequencevoid printSmallestSequence(char S[], int n){ int starting_index = smallestSequence(S, n); for (int i = 0; i < n; i++) cout << S[(starting_index + i) % n];} // driver codeint main(){ char S[] = "DCACBCAA"; int n = 8; printSmallestSequence(S, n); return 0;}
// Java program to find lexicographically// smallest sequence with rotations.import java.util.*;import java.lang.*;import java.io.*; /* Name of the class */class LexoSmallest { // Function to compare lexicographically // two sequence with different starting // indexes. It returns true if sequence // beginning with y is lexicographically // greater. static boolean compareSeq(char[] S, int x, int y, int n) { for (int i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true; } // Function to find starting index // of lexicographically smallest sequence static int smallestSequence(char[] S, int n) { int index = 0; for (int i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index; } // Function to print lexicographically // smallest sequence static void printSmallestSequence(String str, int n) { char[] S = str.toCharArray(); int starting_index = smallestSequence(S, n); for (int i = 0; i < n; i++) System.out.print(S[(starting_index + i) % n]); } // driver code public static void main(String[] args) { String S = "DCACBCAA"; int n = 8; printSmallestSequence(S, n); }}// This code is contributed by Mr Somesh Awasthi
# Python 3 program to find lexicographically# smallest sequence with rotations. # Function to compare lexicographically# two sequence with different starting# indexes. It returns true if sequence# beginning with y is lexicographically# greater.import copy def printSmallestSequence(s): m = copy.copy(s) for i in range(len(s) - 1): if m > s[i:] + s[:i]: m = s[i:] + s[:i] return m #Driver Codeif __name__ == '__main__': st = 'DCACBCAA' print(printSmallestSequence(st)) # This code is contributed by Koushik Reddy B
// C# program to find lexicographically// smallest sequence with rotations.using System; class LexoSmallest { // Function to compare lexicographically // two sequence with different starting // indexes. It returns true if sequence // beginning with y is lexicographically // greater. static bool compareSeq(string S, int x, int y, int n) { for (int i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true; } // Function to find starting index // of lexicographically smallest sequence static int smallestSequence(string S, int n) { int index = 0; for (int i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index; } // Function to print lexicographically // smallest sequence static void printSmallestSequence(string str, int n) { // char[] S=str.toCharArray(); int starting_index = smallestSequence(str, n); for (int i = 0; i < n; i++) Console.Write(str[(starting_index + i) % n]); } // driver code public static void Main() { string S = "DCACBCAA"; int n = 8; printSmallestSequence(S, n); }} // This code is contributed by vt_m.
<?php// PHP program to find lexicographically// smallest sequence with rotations. // Function to compare lexicographically// two sequence with different starting// indexes. It returns true if sequence// beginning with y is lexicographically// greater.function compareSeq($S, $x, $y, $n){ for($i = 0; $i < $n; $i++) { if ($S[$x] < $S[$y]) return false; else if ($S[$x] > $S[$y]) return true; $x = ($x + 1) % $n; $y = ($y + 1) % $n; } return true;} // Function to find starting index// of lexicographically smallest// sequencefunction smallestSequence($S, $n){ $index = 0; for ( $i = 1; $i < $n; $i++) // if new sequence is smaller if (compareSeq($S, $index, $i, $n)) // change index of current min $index = $i; return $index;} // Function to print lexicographically// smallest sequencefunction printSmallestSequence($S, $n){ $starting_index = smallestSequence($S, $n); for ($i = 0; $i < $n; $i++) echo $S[($starting_index + $i) % $n];} // Driver Code $S= "DCACBCAA"; $n = 8; printSmallestSequence($S, $n); // This code is contributed by Ajit.?>
<script>// Javascript program to find lexicographically// smallest sequence with rotations. // Function to compare lexicographically // two sequence with different starting // indexes. It returns true if sequence // beginning with y is lexicographically // greater. function compareSeq(S,x,y,n) { for (let i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true; } // Function to find starting index // of lexicographically smallest sequence function smallestSequence(S,n) { let index = 0; for (let i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index; } // Function to print lexicographically // smallest sequence function printSmallestSequence(str,n) { let S = str.split(""); let starting_index = smallestSequence(S, n); for (let i = 0; i < n; i++) document.write(S[(starting_index + i) % n]); } // driver code let S = "DCACBCAA"; let n = 8; printSmallestSequence(S, n); // This code is contributed by avanitrachhadiya2155 </script>
Output:
AADCACBC
Time Complexity : O(n^2) Auxiliary Space : O(1)This article is contributed by Pratik Chhajer. If you like GeeksforGeeks and would like to contribute, you can also write an article using contribute.geeksforgeeks.org or mail your article to contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
vt_m
jit_t
ukasp
KOUSHIKREDDY2
avanitrachhadiya2155
lexicographic-ordering
rotation
Strings
Strings
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Check for Balanced Brackets in an expression (well-formedness) using Stack
Python program to check if a string is palindrome or not
KMP Algorithm for Pattern Searching
Different methods to reverse a string in C/C++
Array of Strings in C++ (5 Different Ways to Create)
Convert string to char array in C++
Check whether two strings are anagram of each other
Longest Palindromic Substring | Set 1
Caesar Cipher in Cryptography
Top 50 String Coding Problems for Interviews
|
[
{
"code": null,
"e": 26553,
"s": 26525,
"text": "\n28 Apr, 2021"
},
{
"code": null,
"e": 26726,
"s": 26553,
"text": "Write code to find lexicographic minimum in a circular array, e.g. for the array BCABDADAB, the lexicographic minimum is ABBCABDADInput Constraint: 1 < n < 1000 Examples: "
},
{
"code": null,
"e": 26822,
"s": 26726,
"text": "Input: GEEKSQUIZ\nOutput: EEKSQUIZG\n\nInput: GFG\nOutput: FGG\n\nInput : CAPABCQ\nOutput : ABCQCAP"
},
{
"code": null,
"e": 27007,
"s": 26824,
"text": "We have discussed a O(n2Logn) solution in Lexicographically minimum string rotation | Set 1. Here we need to find the starting index of minimum rotation and then print the rotation. "
},
{
"code": null,
"e": 27295,
"s": 27007,
"text": "1) Initially assume 0 to be current min \n starting index.\n2) Loop through i = 1 to n-1.\n a) For each i compare sequence starting \n at i with current min starting index\n b) If sequence starting at i is lexicographically \n smaller, update current min starting \n index."
},
{
"code": null,
"e": 27331,
"s": 27295,
"text": "Here is pseudo-code for algorithm "
},
{
"code": null,
"e": 27561,
"s": 27331,
"text": "function findIndexForSmallestSequence(S, n):\n result = 0\n for i = 1:n-1\n if (sequence beginning at i < \n sequence beginning at result)\n result = i\n end if\n end for\n return result"
},
{
"code": null,
"e": 27606,
"s": 27561,
"text": "Here is implementation of above algorithm. "
},
{
"code": null,
"e": 27610,
"s": 27606,
"text": "C++"
},
{
"code": null,
"e": 27615,
"s": 27610,
"text": "Java"
},
{
"code": null,
"e": 27624,
"s": 27615,
"text": "Python 3"
},
{
"code": null,
"e": 27627,
"s": 27624,
"text": "C#"
},
{
"code": null,
"e": 27631,
"s": 27627,
"text": "PHP"
},
{
"code": null,
"e": 27642,
"s": 27631,
"text": "Javascript"
},
{
"code": "// C++ program to find lexicographically// smallest sequence with rotations.#include <iostream>using namespace std; // Function to compare lexicographically// two sequence with different starting// indexes. It returns true if sequence// beginning with y is lexicographically// greater.bool compareSeq(char S[], int x, int y, int n){ for (int i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true;} // Function to find starting index// of lexicographically smallest sequenceint smallestSequence(char S[], int n){ int index = 0; for (int i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index;} // Function to print lexicographically// smallest sequencevoid printSmallestSequence(char S[], int n){ int starting_index = smallestSequence(S, n); for (int i = 0; i < n; i++) cout << S[(starting_index + i) % n];} // driver codeint main(){ char S[] = \"DCACBCAA\"; int n = 8; printSmallestSequence(S, n); return 0;}",
"e": 28843,
"s": 27642,
"text": null
},
{
"code": "// Java program to find lexicographically// smallest sequence with rotations.import java.util.*;import java.lang.*;import java.io.*; /* Name of the class */class LexoSmallest { // Function to compare lexicographically // two sequence with different starting // indexes. It returns true if sequence // beginning with y is lexicographically // greater. static boolean compareSeq(char[] S, int x, int y, int n) { for (int i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true; } // Function to find starting index // of lexicographically smallest sequence static int smallestSequence(char[] S, int n) { int index = 0; for (int i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index; } // Function to print lexicographically // smallest sequence static void printSmallestSequence(String str, int n) { char[] S = str.toCharArray(); int starting_index = smallestSequence(S, n); for (int i = 0; i < n; i++) System.out.print(S[(starting_index + i) % n]); } // driver code public static void main(String[] args) { String S = \"DCACBCAA\"; int n = 8; printSmallestSequence(S, n); }}// This code is contributed by Mr Somesh Awasthi",
"e": 30417,
"s": 28843,
"text": null
},
{
"code": "# Python 3 program to find lexicographically# smallest sequence with rotations. # Function to compare lexicographically# two sequence with different starting# indexes. It returns true if sequence# beginning with y is lexicographically# greater.import copy def printSmallestSequence(s): m = copy.copy(s) for i in range(len(s) - 1): if m > s[i:] + s[:i]: m = s[i:] + s[:i] return m #Driver Codeif __name__ == '__main__': st = 'DCACBCAA' print(printSmallestSequence(st)) # This code is contributed by Koushik Reddy B",
"e": 30969,
"s": 30417,
"text": null
},
{
"code": "// C# program to find lexicographically// smallest sequence with rotations.using System; class LexoSmallest { // Function to compare lexicographically // two sequence with different starting // indexes. It returns true if sequence // beginning with y is lexicographically // greater. static bool compareSeq(string S, int x, int y, int n) { for (int i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true; } // Function to find starting index // of lexicographically smallest sequence static int smallestSequence(string S, int n) { int index = 0; for (int i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index; } // Function to print lexicographically // smallest sequence static void printSmallestSequence(string str, int n) { // char[] S=str.toCharArray(); int starting_index = smallestSequence(str, n); for (int i = 0; i < n; i++) Console.Write(str[(starting_index + i) % n]); } // driver code public static void Main() { string S = \"DCACBCAA\"; int n = 8; printSmallestSequence(S, n); }} // This code is contributed by vt_m.",
"e": 32452,
"s": 30969,
"text": null
},
{
"code": "<?php// PHP program to find lexicographically// smallest sequence with rotations. // Function to compare lexicographically// two sequence with different starting// indexes. It returns true if sequence// beginning with y is lexicographically// greater.function compareSeq($S, $x, $y, $n){ for($i = 0; $i < $n; $i++) { if ($S[$x] < $S[$y]) return false; else if ($S[$x] > $S[$y]) return true; $x = ($x + 1) % $n; $y = ($y + 1) % $n; } return true;} // Function to find starting index// of lexicographically smallest// sequencefunction smallestSequence($S, $n){ $index = 0; for ( $i = 1; $i < $n; $i++) // if new sequence is smaller if (compareSeq($S, $index, $i, $n)) // change index of current min $index = $i; return $index;} // Function to print lexicographically// smallest sequencefunction printSmallestSequence($S, $n){ $starting_index = smallestSequence($S, $n); for ($i = 0; $i < $n; $i++) echo $S[($starting_index + $i) % $n];} // Driver Code $S= \"DCACBCAA\"; $n = 8; printSmallestSequence($S, $n); // This code is contributed by Ajit.?>",
"e": 33632,
"s": 32452,
"text": null
},
{
"code": "<script>// Javascript program to find lexicographically// smallest sequence with rotations. // Function to compare lexicographically // two sequence with different starting // indexes. It returns true if sequence // beginning with y is lexicographically // greater. function compareSeq(S,x,y,n) { for (let i = 0; i < n; i++) { if (S[x] < S[y]) return false; else if (S[x] > S[y]) return true; x = (x + 1) % n; y = (y + 1) % n; } return true; } // Function to find starting index // of lexicographically smallest sequence function smallestSequence(S,n) { let index = 0; for (let i = 1; i < n; i++) // if new sequence is smaller if (compareSeq(S, index, i, n)) // change index of current min index = i; return index; } // Function to print lexicographically // smallest sequence function printSmallestSequence(str,n) { let S = str.split(\"\"); let starting_index = smallestSequence(S, n); for (let i = 0; i < n; i++) document.write(S[(starting_index + i) % n]); } // driver code let S = \"DCACBCAA\"; let n = 8; printSmallestSequence(S, n); // This code is contributed by avanitrachhadiya2155 </script>",
"e": 35044,
"s": 33632,
"text": null
},
{
"code": null,
"e": 35054,
"s": 35044,
"text": "Output: "
},
{
"code": null,
"e": 35063,
"s": 35054,
"text": "AADCACBC"
},
{
"code": null,
"e": 35537,
"s": 35063,
"text": "Time Complexity : O(n^2) Auxiliary Space : O(1)This article is contributed by Pratik Chhajer. If you like GeeksforGeeks and would like to contribute, you can also write an article using contribute.geeksforgeeks.org or mail your article to contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "
},
{
"code": null,
"e": 35542,
"s": 35537,
"text": "vt_m"
},
{
"code": null,
"e": 35548,
"s": 35542,
"text": "jit_t"
},
{
"code": null,
"e": 35554,
"s": 35548,
"text": "ukasp"
},
{
"code": null,
"e": 35568,
"s": 35554,
"text": "KOUSHIKREDDY2"
},
{
"code": null,
"e": 35589,
"s": 35568,
"text": "avanitrachhadiya2155"
},
{
"code": null,
"e": 35612,
"s": 35589,
"text": "lexicographic-ordering"
},
{
"code": null,
"e": 35621,
"s": 35612,
"text": "rotation"
},
{
"code": null,
"e": 35629,
"s": 35621,
"text": "Strings"
},
{
"code": null,
"e": 35637,
"s": 35629,
"text": "Strings"
},
{
"code": null,
"e": 35735,
"s": 35637,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 35810,
"s": 35735,
"text": "Check for Balanced Brackets in an expression (well-formedness) using Stack"
},
{
"code": null,
"e": 35867,
"s": 35810,
"text": "Python program to check if a string is palindrome or not"
},
{
"code": null,
"e": 35903,
"s": 35867,
"text": "KMP Algorithm for Pattern Searching"
},
{
"code": null,
"e": 35950,
"s": 35903,
"text": "Different methods to reverse a string in C/C++"
},
{
"code": null,
"e": 36003,
"s": 35950,
"text": "Array of Strings in C++ (5 Different Ways to Create)"
},
{
"code": null,
"e": 36039,
"s": 36003,
"text": "Convert string to char array in C++"
},
{
"code": null,
"e": 36091,
"s": 36039,
"text": "Check whether two strings are anagram of each other"
},
{
"code": null,
"e": 36129,
"s": 36091,
"text": "Longest Palindromic Substring | Set 1"
},
{
"code": null,
"e": 36159,
"s": 36129,
"text": "Caesar Cipher in Cryptography"
}
] |
Python - scipy.fft.dct() method - GeeksforGeeks
|
01 Oct, 2020
With the help of scipy.fft.dct() method, we can compute the discrete cosine transform by selecting different types of sequences and return the transformed array by using this method.
Syntax :
scipy.fft.dct(x, type=2)
Return value: It will return the transformed array.
Example #1: In this example, we can see that by using scipy.fft.dct() method, we are able to get the discrete cosine transform by selecting different types of sequences by default itβs 2.
Python3
# import scipyfrom scipy import fft # Using scipy.fft.dct() methodgfg = fft.dct([1, 2, 3, 4]) print(gfg)
Output :
[20.00000000 -6.30864406 0.00000000 -0.44834153]
Example #2 :
Python3
# import scipyfrom scipy import fft # Using scipy.fft.dct() methodgfg = fft.dct([-6, 5, -4, 3, -2, 1], 3) print(gfg)
Output :
[ -0.50866619 -0.58578644 -0.79439535 -1.34919819 -3.41421356
-29.34774027]
Python-scipy
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Install PIP on Windows ?
Check if element exists in list in Python
How To Convert Python Dictionary To JSON?
How to drop one or multiple columns in Pandas Dataframe
Python Classes and Objects
Python | Get unique values from a list
Python | os.path.join() method
Create a directory in Python
Defaultdict in Python
Python | Pandas dataframe.groupby()
|
[
{
"code": null,
"e": 25561,
"s": 25533,
"text": "\n01 Oct, 2020"
},
{
"code": null,
"e": 25744,
"s": 25561,
"text": "With the help of scipy.fft.dct() method, we can compute the discrete cosine transform by selecting different types of sequences and return the transformed array by using this method."
},
{
"code": null,
"e": 25754,
"s": 25744,
"text": "Syntax : "
},
{
"code": null,
"e": 25780,
"s": 25754,
"text": "scipy.fft.dct(x, type=2)\n"
},
{
"code": null,
"e": 25832,
"s": 25780,
"text": "Return value: It will return the transformed array."
},
{
"code": null,
"e": 26020,
"s": 25832,
"text": "Example #1: In this example, we can see that by using scipy.fft.dct() method, we are able to get the discrete cosine transform by selecting different types of sequences by default itβs 2."
},
{
"code": null,
"e": 26028,
"s": 26020,
"text": "Python3"
},
{
"code": "# import scipyfrom scipy import fft # Using scipy.fft.dct() methodgfg = fft.dct([1, 2, 3, 4]) print(gfg)",
"e": 26135,
"s": 26028,
"text": null
},
{
"code": null,
"e": 26144,
"s": 26135,
"text": "Output :"
},
{
"code": null,
"e": 26197,
"s": 26144,
"text": "[20.00000000 -6.30864406 0.00000000 -0.44834153]\n"
},
{
"code": null,
"e": 26210,
"s": 26197,
"text": "Example #2 :"
},
{
"code": null,
"e": 26218,
"s": 26210,
"text": "Python3"
},
{
"code": "# import scipyfrom scipy import fft # Using scipy.fft.dct() methodgfg = fft.dct([-6, 5, -4, 3, -2, 1], 3) print(gfg)",
"e": 26337,
"s": 26218,
"text": null
},
{
"code": null,
"e": 26346,
"s": 26337,
"text": "Output :"
},
{
"code": null,
"e": 26428,
"s": 26346,
"text": "[ -0.50866619 -0.58578644 -0.79439535 -1.34919819 -3.41421356\n -29.34774027]\n"
},
{
"code": null,
"e": 26441,
"s": 26428,
"text": "Python-scipy"
},
{
"code": null,
"e": 26448,
"s": 26441,
"text": "Python"
},
{
"code": null,
"e": 26546,
"s": 26448,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26578,
"s": 26546,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 26620,
"s": 26578,
"text": "Check if element exists in list in Python"
},
{
"code": null,
"e": 26662,
"s": 26620,
"text": "How To Convert Python Dictionary To JSON?"
},
{
"code": null,
"e": 26718,
"s": 26662,
"text": "How to drop one or multiple columns in Pandas Dataframe"
},
{
"code": null,
"e": 26745,
"s": 26718,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 26784,
"s": 26745,
"text": "Python | Get unique values from a list"
},
{
"code": null,
"e": 26815,
"s": 26784,
"text": "Python | os.path.join() method"
},
{
"code": null,
"e": 26844,
"s": 26815,
"text": "Create a directory in Python"
},
{
"code": null,
"e": 26866,
"s": 26844,
"text": "Defaultdict in Python"
}
] |
Addition of two numbers without carry - GeeksforGeeks
|
10 May, 2022
You are given two positive number n and m. You have to find simply addition of both number but with a given condition that there is not any carry system in this addition. That is no carry is added at higher MSBs.Examples :
Input : m = 456, n = 854
Output : 200
Input : m = 456, n = 4
Output : 450
Algorithm :
Input n, m while(n||m)
{
// Add each bits
bit_sum = (n%10) + (m%10);
// Neglect carry
bit_sum %= 10;
// Update result
// multiplier to maintain place value
res = (bit_sum * multiplier) + res;
n /= 10;
m /= 10;
// Update multiplier
multiplier *=10;
} print res
Approach : To solve this problem we will need the bit by bit addition of number where we start adding two number from right most bit (LSB) and add integers from both numbers with same position. Also we will neglect carry at each position so that carry will not affect further higher bit position. Start adding both numbers bit by bit and for each bit take sum of integers then neglect their carry by taking modulo of bit_sum by 10 further add bit_sum to res by multiplying bit_sum with a multiplier specifying place value. (Multiplier got incremented 10 times on each iteration.) Below is the implementation of above approach :
C++
Java
Python3
C#
PHP
Javascript
// CPP program for special// addition of two number#include <bits/stdc++.h>using namespace std; int xSum(int n, int m){ // variable to store result int res = 0; // variable to maintain // place value int multiplier = 1; // variable to maintain // each digit sum int bit_sum; // Add numbers till each // number become zero while (n || m) { // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n /= 10; m /= 10; // Update multiplier multiplier *= 10; } return res;} // Driver programint main(){ int n = 8458; int m = 8732; cout << xSum(n, m); return 0;}
// Java program for special// addition of two numberimport java.util.*;import java.lang.*; public class GfG { public static int xSum(int n, int m) { int res = 0; int multiplier = 1; int bit_sum; // Add numbers till each // number become zero while (true) { if(n==0 && m==0) break; // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n /= 10; m /= 10; // Update multiplier multiplier *= 10; } return res; } // Driver function public static void main(String args[]) { int n = 8458; int m = 8732; System.out.println(xSum(n, m)); }}/* This code is contributed by Sagar Shukla */
# Python3 program for special# addition of two numberimport math def xSum(n, m) : # variable to # store result res = 0 # variable to maintain # place value multiplier = 1 # variable to maintain # each digit sum bit_sum = 0 # Add numbers till each # number become zero while (n or m) : # Add each bits bit_sum = ((n % 10) + (m % 10)) # Neglect carry bit_sum = bit_sum % 10 # Update result res = (bit_sum * multiplier) + res n = math.floor(n / 10) m = math.floor(m / 10) # Update multiplier multiplier = multiplier * 10 return res # Driver coden = 8458m = 8732print (xSum(n, m)) # This code is contributed by# Manish Shaw(manishshaw1)
// C# program for special// addition of two numberusing System; public class GfG { public static int xSum(int n, int m) { int res = 0; int multiplier = 1; int bit_sum; // Add numbers till each // number become zero while (true) { // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n /= 10; m /= 10; // Update multiplier multiplier *= 10; if (n == 0) break; if (m == 0) break; } return res; } // Driver function public static void Main() { int n = 8458; int m = 8732; Console.WriteLine(xSum(n, m)); }} /* This code is contributed by Vt_m */
<?php// php program for special// addition of two number function xSum($n, $m){ // variable to store result $res = 0; // variable to maintain // place value $multiplier = 1; // variable to maintain // each digit sum $bit_sum; // Add numbers till each // number become zero while ($n || $m) { // Add each bits $bit_sum = ($n % 10) + ($m % 10); // Neglect carry $bit_sum %= 10; // Update result $res = ($bit_sum * $multiplier) + $res; $n =floor($n / 10); $m =floor($m / 10); // Update multiplier $multiplier *= 10; } return $res;} // Driver code $n = 8458; $m = 8732; echo xSum($n, $m); //This code is contributed by mits?>
<script> // Javascript program for special// addition of two numberfunction xSum(n, m){ // variable to store result var res = 0; // variable to maintain // place value var multiplier = 1; // variable to maintain // each digit sum var bit_sum; // Add numbers till each // number become zero while (n || m) { // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n = parseInt(n / 10); m = parseInt(m / 10); // Update multiplier multiplier *= 10; } return res;} // Driver programvar n = 8458;var m = 8732;document.write(xSum(n, m)); // This code is contributed by noob2000.</script>
Output :
6180
Mithun Kumar
manishshaw1
offbeat
noob2000
simmytarika5
sagartomar9927
Bit Magic
Bit Magic
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Little and Big Endian Mystery
Cyclic Redundancy Check and Modulo-2 Division
Binary representation of a given number
Program to find whether a given number is power of 2
Josephus problem | Set 1 (A O(n) Solution)
Bit Fields in C
Set, Clear and Toggle a given bit of a number in C
Find the element that appears once
Bits manipulation (Important tactics)
C++ bitset and its application
|
[
{
"code": null,
"e": 26387,
"s": 26359,
"text": "\n10 May, 2022"
},
{
"code": null,
"e": 26612,
"s": 26387,
"text": "You are given two positive number n and m. You have to find simply addition of both number but with a given condition that there is not any carry system in this addition. That is no carry is added at higher MSBs.Examples : "
},
{
"code": null,
"e": 26687,
"s": 26612,
"text": "Input : m = 456, n = 854\nOutput : 200\n\nInput : m = 456, n = 4\nOutput : 450"
},
{
"code": null,
"e": 26701,
"s": 26687,
"text": "Algorithm : "
},
{
"code": null,
"e": 27061,
"s": 26701,
"text": "Input n, m while(n||m)\n {\n // Add each bits \n bit_sum = (n%10) + (m%10);\n\n // Neglect carry\n bit_sum %= 10;\n\n // Update result\n // multiplier to maintain place value\n res = (bit_sum * multiplier) + res;\n n /= 10;\n m /= 10;\n\n // Update multiplier\n multiplier *=10;\n } print res"
},
{
"code": null,
"e": 27694,
"s": 27063,
"text": "Approach : To solve this problem we will need the bit by bit addition of number where we start adding two number from right most bit (LSB) and add integers from both numbers with same position. Also we will neglect carry at each position so that carry will not affect further higher bit position. Start adding both numbers bit by bit and for each bit take sum of integers then neglect their carry by taking modulo of bit_sum by 10 further add bit_sum to res by multiplying bit_sum with a multiplier specifying place value. (Multiplier got incremented 10 times on each iteration.) Below is the implementation of above approach : "
},
{
"code": null,
"e": 27698,
"s": 27694,
"text": "C++"
},
{
"code": null,
"e": 27703,
"s": 27698,
"text": "Java"
},
{
"code": null,
"e": 27711,
"s": 27703,
"text": "Python3"
},
{
"code": null,
"e": 27714,
"s": 27711,
"text": "C#"
},
{
"code": null,
"e": 27718,
"s": 27714,
"text": "PHP"
},
{
"code": null,
"e": 27729,
"s": 27718,
"text": "Javascript"
},
{
"code": "// CPP program for special// addition of two number#include <bits/stdc++.h>using namespace std; int xSum(int n, int m){ // variable to store result int res = 0; // variable to maintain // place value int multiplier = 1; // variable to maintain // each digit sum int bit_sum; // Add numbers till each // number become zero while (n || m) { // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n /= 10; m /= 10; // Update multiplier multiplier *= 10; } return res;} // Driver programint main(){ int n = 8458; int m = 8732; cout << xSum(n, m); return 0;}",
"e": 28515,
"s": 27729,
"text": null
},
{
"code": "// Java program for special// addition of two numberimport java.util.*;import java.lang.*; public class GfG { public static int xSum(int n, int m) { int res = 0; int multiplier = 1; int bit_sum; // Add numbers till each // number become zero while (true) { if(n==0 && m==0) break; // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n /= 10; m /= 10; // Update multiplier multiplier *= 10; } return res; } // Driver function public static void main(String args[]) { int n = 8458; int m = 8732; System.out.println(xSum(n, m)); }}/* This code is contributed by Sagar Shukla */",
"e": 29436,
"s": 28515,
"text": null
},
{
"code": "# Python3 program for special# addition of two numberimport math def xSum(n, m) : # variable to # store result res = 0 # variable to maintain # place value multiplier = 1 # variable to maintain # each digit sum bit_sum = 0 # Add numbers till each # number become zero while (n or m) : # Add each bits bit_sum = ((n % 10) + (m % 10)) # Neglect carry bit_sum = bit_sum % 10 # Update result res = (bit_sum * multiplier) + res n = math.floor(n / 10) m = math.floor(m / 10) # Update multiplier multiplier = multiplier * 10 return res # Driver coden = 8458m = 8732print (xSum(n, m)) # This code is contributed by# Manish Shaw(manishshaw1)",
"e": 30247,
"s": 29436,
"text": null
},
{
"code": "// C# program for special// addition of two numberusing System; public class GfG { public static int xSum(int n, int m) { int res = 0; int multiplier = 1; int bit_sum; // Add numbers till each // number become zero while (true) { // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n /= 10; m /= 10; // Update multiplier multiplier *= 10; if (n == 0) break; if (m == 0) break; } return res; } // Driver function public static void Main() { int n = 8458; int m = 8732; Console.WriteLine(xSum(n, m)); }} /* This code is contributed by Vt_m */",
"e": 31136,
"s": 30247,
"text": null
},
{
"code": "<?php// php program for special// addition of two number function xSum($n, $m){ // variable to store result $res = 0; // variable to maintain // place value $multiplier = 1; // variable to maintain // each digit sum $bit_sum; // Add numbers till each // number become zero while ($n || $m) { // Add each bits $bit_sum = ($n % 10) + ($m % 10); // Neglect carry $bit_sum %= 10; // Update result $res = ($bit_sum * $multiplier) + $res; $n =floor($n / 10); $m =floor($m / 10); // Update multiplier $multiplier *= 10; } return $res;} // Driver code $n = 8458; $m = 8732; echo xSum($n, $m); //This code is contributed by mits?>",
"e": 31934,
"s": 31136,
"text": null
},
{
"code": "<script> // Javascript program for special// addition of two numberfunction xSum(n, m){ // variable to store result var res = 0; // variable to maintain // place value var multiplier = 1; // variable to maintain // each digit sum var bit_sum; // Add numbers till each // number become zero while (n || m) { // Add each bits bit_sum = (n % 10) + (m % 10); // Neglect carry bit_sum %= 10; // Update result res = (bit_sum * multiplier) + res; n = parseInt(n / 10); m = parseInt(m / 10); // Update multiplier multiplier *= 10; } return res;} // Driver programvar n = 8458;var m = 8732;document.write(xSum(n, m)); // This code is contributed by noob2000.</script>",
"e": 32741,
"s": 31934,
"text": null
},
{
"code": null,
"e": 32752,
"s": 32741,
"text": "Output : "
},
{
"code": null,
"e": 32757,
"s": 32752,
"text": "6180"
},
{
"code": null,
"e": 32772,
"s": 32759,
"text": "Mithun Kumar"
},
{
"code": null,
"e": 32784,
"s": 32772,
"text": "manishshaw1"
},
{
"code": null,
"e": 32792,
"s": 32784,
"text": "offbeat"
},
{
"code": null,
"e": 32801,
"s": 32792,
"text": "noob2000"
},
{
"code": null,
"e": 32814,
"s": 32801,
"text": "simmytarika5"
},
{
"code": null,
"e": 32829,
"s": 32814,
"text": "sagartomar9927"
},
{
"code": null,
"e": 32839,
"s": 32829,
"text": "Bit Magic"
},
{
"code": null,
"e": 32849,
"s": 32839,
"text": "Bit Magic"
},
{
"code": null,
"e": 32947,
"s": 32849,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 32977,
"s": 32947,
"text": "Little and Big Endian Mystery"
},
{
"code": null,
"e": 33023,
"s": 32977,
"text": "Cyclic Redundancy Check and Modulo-2 Division"
},
{
"code": null,
"e": 33063,
"s": 33023,
"text": "Binary representation of a given number"
},
{
"code": null,
"e": 33116,
"s": 33063,
"text": "Program to find whether a given number is power of 2"
},
{
"code": null,
"e": 33159,
"s": 33116,
"text": "Josephus problem | Set 1 (A O(n) Solution)"
},
{
"code": null,
"e": 33175,
"s": 33159,
"text": "Bit Fields in C"
},
{
"code": null,
"e": 33226,
"s": 33175,
"text": "Set, Clear and Toggle a given bit of a number in C"
},
{
"code": null,
"e": 33261,
"s": 33226,
"text": "Find the element that appears once"
},
{
"code": null,
"e": 33299,
"s": 33261,
"text": "Bits manipulation (Important tactics)"
}
] |
BitSet in Scala - GeeksforGeeks
|
04 Jul, 2019
A set is a collection which only contains unique items which are not repeatable. A BitSet is a collection of small integers as the bits of a larger integer. Non negative integers sets which represented as array of variable-size of bits packed into 64-bit words is called BitSets. The largest number stored in bitset is the memory of a bitset. It extends Set trait.
Syntax:
var BS : BitSet = BitSet(element1, element2, element3, ....)
Where BS is the name of created BitSet
In Scala, BitSet have two versions: scala.collection.immutable.BitSet and scala.collection.mutable.BitSet. They are almost identical but the mutable version changes the bits in place so immutable data structures are much better for concurrency.
Initialize a BitSet : Below is the example to create or initialize BitSet.
Example :
// Scala program to initialize a BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println("Initialize a BitSet") // Creating HashSet val bitSet: BitSet = BitSet(0, 1, 2, 3) println(s"Elements are = $bitSet") }}
Initialize a BitSet
Elements are = BitSet(0, 1, 2, 3)
Check specific elements in BitSet :
Example :
// Scala program of Check specific elements in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println("Initialize a BitSet") // Creating BitSet val bitSet: BitSet = BitSet(0, 1, 2, 3) println(s"Elements are = $bitSet") // Checking println(s"Element 2 = ${bitSet(2)}") println(s"Element 4 = ${bitSet(4)}") }}
Initialize a BitSet
Elements are = BitSet(0, 1, 2, 3)
Element 2 = true
Element 4 = false
Adding an elements in BitSet : We can add an element in BitSet by using + sign. below is the example of adding an element in BitSet.
Example :
// Scala program of adding an element in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println("Initialize a BitSet") // Creating BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s"Elements are = $bs") // Adding an element in BitSet val bs1: BitSet = bs + 10 + 11 println(s"Adding elements to BitSet = $bs1") }}
Initialize a BitSet
Elements are = BitSet(0, 1, 2, 3)
Adding elements to BitSet = BitSet(0, 1, 2, 3, 10, 11)
Adding more than one element in BitSet : We can add more than one element in BitSet by using ++ sign. below is the example of adding more than one elements in BitSet.
Example :
// Scala program of adding more elements in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println("Initialize a BitSet") // Creating BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s"Elements are = $bs") // Adding elements in BitSet val bs1: BitSet = bs ++ BitSet(4, 5, 6) println(s"Add more than one elements to BitSet = $bs1") }}
Initialize a BitSet
Elements are = BitSet(0, 1, 2, 3)
Add more than one elements to BitSet = BitSet(0, 1, 2, 3, 4, 5, 6)
Remove element in BitSet : We can remove an element in BitSet by using β sign. below is the example of removing an element in BitSet.
Example :
// Scala program of removing element in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println("Initialize a BitSet") // Creating BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s"Elements are = $bs") // removing elements in BitSet val bs1: BitSet = bs - 2 println(s"remove element from bitset = $bs1") }}
Initialize a BitSet
Elements are = BitSet(0, 1, 2, 3)
remove element from bitset = BitSet(0, 1, 3)
Find the intersection between two BitSets : We can find intersection between two BitSets by using & sign. below is the example of finding intersection between two BitSets.
Example :
// Scala program of finding the intersection between two BitSetsimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println("Initialize two BitSets") // Creating two BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s"Elements of bitset1 are = $bs") val bs1: BitSet = BitSet(4, 5, 3, 6) println(s"Elements of bitset2 are = $bs1") // finding the intersection between two BitSets println(s"Intersection of bitSet1 and bitSet2 = ${bs & bs1}") }}
Initialize two BitSets
Elements of bitset1 are = BitSet(0, 1, 2, 3)
Elements of bitset2 are = BitSet(3, 4, 5, 6)
Intersection of bitSet1 and bitSet2 = BitSet(3)
Initializing an empty BitSet :
Example :
// Scala program of Initializing an empty BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { // Initializing an empty BitSet val emptyBitSet: BitSet = BitSet.empty println(s"Empty BitSet = $emptyBitSet") }}
Empty BitSet = BitSet()
scala-collection
Scala
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
For Loop in Scala
Scala | flatMap Method
Scala | map() method
Scala List filter() method with example
Scala | reduce() Function
String concatenation in Scala
Type Casting in Scala
Scala Tutorial β Learn Scala with Step By Step Guide
Scala List contains() method with example
Scala String substring() method with example
|
[
{
"code": null,
"e": 26063,
"s": 26035,
"text": "\n04 Jul, 2019"
},
{
"code": null,
"e": 26428,
"s": 26063,
"text": "A set is a collection which only contains unique items which are not repeatable. A BitSet is a collection of small integers as the bits of a larger integer. Non negative integers sets which represented as array of variable-size of bits packed into 64-bit words is called BitSets. The largest number stored in bitset is the memory of a bitset. It extends Set trait."
},
{
"code": null,
"e": 26436,
"s": 26428,
"text": "Syntax:"
},
{
"code": null,
"e": 26539,
"s": 26436,
"text": "var BS : BitSet = BitSet(element1, element2, element3, ....) \n\nWhere BS is the name of created BitSet"
},
{
"code": null,
"e": 26784,
"s": 26539,
"text": "In Scala, BitSet have two versions: scala.collection.immutable.BitSet and scala.collection.mutable.BitSet. They are almost identical but the mutable version changes the bits in place so immutable data structures are much better for concurrency."
},
{
"code": null,
"e": 26859,
"s": 26784,
"text": "Initialize a BitSet : Below is the example to create or initialize BitSet."
},
{
"code": null,
"e": 26869,
"s": 26859,
"text": "Example :"
},
{
"code": "// Scala program to initialize a BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println(\"Initialize a BitSet\") // Creating HashSet val bitSet: BitSet = BitSet(0, 1, 2, 3) println(s\"Elements are = $bitSet\") }} ",
"e": 27208,
"s": 26869,
"text": null
},
{
"code": null,
"e": 27263,
"s": 27208,
"text": "Initialize a BitSet\nElements are = BitSet(0, 1, 2, 3)\n"
},
{
"code": null,
"e": 27300,
"s": 27263,
"text": " Check specific elements in BitSet :"
},
{
"code": null,
"e": 27310,
"s": 27300,
"text": "Example :"
},
{
"code": "// Scala program of Check specific elements in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println(\"Initialize a BitSet\") // Creating BitSet val bitSet: BitSet = BitSet(0, 1, 2, 3) println(s\"Elements are = $bitSet\") // Checking println(s\"Element 2 = ${bitSet(2)}\") println(s\"Element 4 = ${bitSet(4)}\") }} ",
"e": 27779,
"s": 27310,
"text": null
},
{
"code": null,
"e": 27869,
"s": 27779,
"text": "Initialize a BitSet\nElements are = BitSet(0, 1, 2, 3)\nElement 2 = true\nElement 4 = false\n"
},
{
"code": null,
"e": 28003,
"s": 27869,
"text": " Adding an elements in BitSet : We can add an element in BitSet by using + sign. below is the example of adding an element in BitSet."
},
{
"code": null,
"e": 28013,
"s": 28003,
"text": "Example :"
},
{
"code": "// Scala program of adding an element in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println(\"Initialize a BitSet\") // Creating BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s\"Elements are = $bs\") // Adding an element in BitSet val bs1: BitSet = bs + 10 + 11 println(s\"Adding elements to BitSet = $bs1\") }}",
"e": 28488,
"s": 28013,
"text": null
},
{
"code": null,
"e": 28598,
"s": 28488,
"text": "Initialize a BitSet\nElements are = BitSet(0, 1, 2, 3)\nAdding elements to BitSet = BitSet(0, 1, 2, 3, 10, 11)\n"
},
{
"code": null,
"e": 28766,
"s": 28598,
"text": " Adding more than one element in BitSet : We can add more than one element in BitSet by using ++ sign. below is the example of adding more than one elements in BitSet."
},
{
"code": null,
"e": 28776,
"s": 28766,
"text": "Example :"
},
{
"code": "// Scala program of adding more elements in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println(\"Initialize a BitSet\") // Creating BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s\"Elements are = $bs\") // Adding elements in BitSet val bs1: BitSet = bs ++ BitSet(4, 5, 6) println(s\"Add more than one elements to BitSet = $bs1\") }}",
"e": 29272,
"s": 28776,
"text": null
},
{
"code": null,
"e": 29395,
"s": 29272,
"text": "Initialize a BitSet\nElements are = BitSet(0, 1, 2, 3)\nAdd more than one elements to BitSet = BitSet(0, 1, 2, 3, 4, 5, 6)\n"
},
{
"code": null,
"e": 29530,
"s": 29395,
"text": " Remove element in BitSet : We can remove an element in BitSet by using β sign. below is the example of removing an element in BitSet."
},
{
"code": null,
"e": 29540,
"s": 29530,
"text": "Example :"
},
{
"code": "// Scala program of removing element in BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println(\"Initialize a BitSet\") // Creating BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s\"Elements are = $bs\") // removing elements in BitSet val bs1: BitSet = bs - 2 println(s\"remove element from bitset = $bs1\") }}",
"e": 30009,
"s": 29540,
"text": null
},
{
"code": null,
"e": 30109,
"s": 30009,
"text": "Initialize a BitSet\nElements are = BitSet(0, 1, 2, 3)\nremove element from bitset = BitSet(0, 1, 3)\n"
},
{
"code": null,
"e": 30282,
"s": 30109,
"text": " Find the intersection between two BitSets : We can find intersection between two BitSets by using & sign. below is the example of finding intersection between two BitSets."
},
{
"code": null,
"e": 30292,
"s": 30282,
"text": "Example :"
},
{
"code": "// Scala program of finding the intersection between two BitSetsimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { println(\"Initialize two BitSets\") // Creating two BitSet val bs: BitSet = BitSet(0, 1, 2, 3) println(s\"Elements of bitset1 are = $bs\") val bs1: BitSet = BitSet(4, 5, 3, 6) println(s\"Elements of bitset2 are = $bs1\") // finding the intersection between two BitSets println(s\"Intersection of bitSet1 and bitSet2 = ${bs & bs1}\") }}",
"e": 30902,
"s": 30292,
"text": null
},
{
"code": null,
"e": 31064,
"s": 30902,
"text": "Initialize two BitSets\nElements of bitset1 are = BitSet(0, 1, 2, 3)\nElements of bitset2 are = BitSet(3, 4, 5, 6)\nIntersection of bitSet1 and bitSet2 = BitSet(3)\n"
},
{
"code": null,
"e": 31096,
"s": 31064,
"text": " Initializing an empty BitSet :"
},
{
"code": null,
"e": 31106,
"s": 31096,
"text": "Example :"
},
{
"code": "// Scala program of Initializing an empty BitSetimport scala.collection.immutable.BitSet // Creating objectobject GFG{ // Main method def main(args:Array[String]) { // Initializing an empty BitSet val emptyBitSet: BitSet = BitSet.empty println(s\"Empty BitSet = $emptyBitSet\") }}",
"e": 31421,
"s": 31106,
"text": null
},
{
"code": null,
"e": 31446,
"s": 31421,
"text": "Empty BitSet = BitSet()\n"
},
{
"code": null,
"e": 31463,
"s": 31446,
"text": "scala-collection"
},
{
"code": null,
"e": 31469,
"s": 31463,
"text": "Scala"
},
{
"code": null,
"e": 31567,
"s": 31469,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 31585,
"s": 31567,
"text": "For Loop in Scala"
},
{
"code": null,
"e": 31608,
"s": 31585,
"text": "Scala | flatMap Method"
},
{
"code": null,
"e": 31629,
"s": 31608,
"text": "Scala | map() method"
},
{
"code": null,
"e": 31669,
"s": 31629,
"text": "Scala List filter() method with example"
},
{
"code": null,
"e": 31695,
"s": 31669,
"text": "Scala | reduce() Function"
},
{
"code": null,
"e": 31725,
"s": 31695,
"text": "String concatenation in Scala"
},
{
"code": null,
"e": 31747,
"s": 31725,
"text": "Type Casting in Scala"
},
{
"code": null,
"e": 31800,
"s": 31747,
"text": "Scala Tutorial β Learn Scala with Step By Step Guide"
},
{
"code": null,
"e": 31842,
"s": 31800,
"text": "Scala List contains() method with example"
}
] |
Python | Different ways to kill a Thread - GeeksforGeeks
|
20 Jul, 2021
In general, killing threads abruptly is considered a bad programming practice. Killing a thread abruptly might leave a critical resource that must be closed properly, open. But you might want to kill a thread once some specific time period has passed or some interrupt has been generated. There are the various methods by which you can kill a thread in python.
Raising exceptions in a python thread
Set/Reset stop flag
Using traces to kill threads
Using the multiprocessing module to kill threads
Killing Python thread by setting it as daemon
Using a hidden function _stop()
Raising exceptions in a python thread : This method uses the function PyThreadState_SetAsyncExc() to raise an exception in the a thread. For Example,
Python3
# Python program raising# exceptions in a python# thread import threadingimport ctypesimport time class thread_with_exception(threading.Thread): def __init__(self, name): threading.Thread.__init__(self) self.name = name def run(self): # target function of the thread class try: while True: print('running ' + self.name) finally: print('ended') def get_id(self): # returns id of the respective thread if hasattr(self, '_thread_id'): return self._thread_id for id, thread in threading._active.items(): if thread is self: return id def raise_exception(self): thread_id = self.get_id() res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit)) if res > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0) print('Exception raise failure') t1 = thread_with_exception('Thread 1')t1.start()time.sleep(2)t1.raise_exception()t1.join()
When we run the code above in a machine and you will notice, as soon as the function raise_exception() is called, the target function run() ends. This is because as soon as an exception is raised, program control jumps out of the try block and run() function is terminated. After that join() function can be called to kill the thread. In the absence of the function run_exception(), the target function run() keeps running forever and join() function is never called to kill the thread. Set/Reset stop flag : In order to kill a threads, we can declare a stop flag and this flag will be check occasionally by the thread. For Example
Python3
# Python program showing# how to kill threads# using set/reset stop# flag import threadingimport time def run(): while True: print('thread running') global stop_threads if stop_threads: break stop_threads = Falset1 = threading.Thread(target = run)t1.start()time.sleep(1)stop_threads = Truet1.join()print('thread killed')
In the above code, as soon as the global variable stop_threads is set, the target function run() ends and the thread t1 can be killed by using t1.join(). But one may refrain from using global variable due to certain reasons. For those situations, function objects can be passed to provide a similar functionality as shown below.
Python3
# Python program killing# threads using stop# flag import threadingimport time def run(stop): while True: print('thread running') if stop(): break def main(): stop_threads = False t1 = threading.Thread(target = run, args =(lambda : stop_threads, )) t1.start() time.sleep(1) stop_threads = True t1.join() print('thread killed')main()
The function object passed in the above code always returns the value of the local variable stop_threads. This value is checked in the function run(), and as soon as stop_threads is reset, the run() function ends and the thread can be killed. Using traces to kill threads : This methods works by installing traces in each thread. Each trace terminates itself on the detection of some stimulus or flag, thus instantly killing the associated thread. For Example
Python3
# Python program using# traces to kill threads import sysimport traceimport threadingimport timeclass thread_with_trace(threading.Thread): def __init__(self, *args, **keywords): threading.Thread.__init__(self, *args, **keywords) self.killed = False def start(self): self.__run_backup = self.run self.run = self.__run threading.Thread.start(self) def __run(self): sys.settrace(self.globaltrace) self.__run_backup() self.run = self.__run_backup def globaltrace(self, frame, event, arg): if event == 'call': return self.localtrace else: return None def localtrace(self, frame, event, arg): if self.killed: if event == 'line': raise SystemExit() return self.localtrace def kill(self): self.killed = True def func(): while True: print('thread running') t1 = thread_with_trace(target = func)t1.start()time.sleep(2)t1.kill()t1.join()if not t1.isAlive(): print('thread killed')
In this code, start() is slightly modified to set the system trace function using settrace(). The local trace function is defined such that, whenever the kill flag (killed) of the respective thread is set, a SystemExit exception is raised upon the execution of the next line of code, which end the execution of the target function func. Now the thread can be killed with join(). Using the multiprocessing module to kill threads : The multiprocessing module of Python allows you to spawn processes in the similar way you spawn threads using the threading module. The interface of the multithreading module is similar to that of the threading module. For Example, in a given code we created three threads(processes) which count from 1 to 9.
Python3
# Python program creating# three threads import threadingimport time # counts from 1 to 9def func(number): for i in range(1, 10): time.sleep(0.01) print('Thread ' + str(number) + ': prints ' + str(number*i)) # creates 3 threadsfor i in range(0, 3): thread = threading.Thread(target=func, args=(i,)) thread.start()
The functionality of the above code can also be implemented by using the multiprocessing module in a similar manner, with very few changes. See the code given below.
Python3
# Python program creating# thread using multiprocessing# module import multiprocessingimport time def func(number): for i in range(1, 10): time.sleep(0.01) print('Processing ' + str(number) + ': prints ' + str(number*i)) for i in range(0, 3): process = multiprocessing.Process(target=func, args=(i,)) process.start()
Though the interface of the two modules is similar, the two modules have very different implementations. All the threads share global variables, whereas processes are completely separate from each other. Hence, killing processes is much safer as compared to killing threads. The Process class is provided a method, terminate(), to kill a process. Now, getting back to the initial problem. Suppose in the above code, we want to kill all the processes after 0.03s have passed. This functionality is achieved using the multiprocessing module in the following code.
Python3
# Python program killing# a thread using multiprocessing# module import multiprocessingimport time def func(number): for i in range(1, 10): time.sleep(0.01) print('Processing ' + str(number) + ': prints ' + str(number*i)) # list of all processes, so that they can be killed afterwardsall_processes = [] for i in range(0, 3): process = multiprocessing.Process(target=func, args=(i,)) process.start() all_processes.append(process) # kill all processes after 0.03stime.sleep(0.03)for process in all_processes: process.terminate()
Though the two modules have different implementations. This functionality provided by the multiprocessing module in the above code is similar to killing threads. Hence, the multiprocessing module can be used as a simple alternative whenever we are required to implement the killing of threads in Python. Killing Python thread by setting it as daemon : Daemon threads are those threads which are killed when the main program exits. For Example
Python3
import threadingimport timeimport sys def func(): while True: time.sleep(0.5) print("Thread alive, and it won't die on program termination") t1 = threading.Thread(target=func)t1.start()time.sleep(2)sys.exit()
Notice that, thread t1 stays alive and prevents the main program to exit via sys.exit(). In Python, any alive non-daemon thread blocks the main program to exit. Whereas, daemon threads themselves are killed as soon as the main program exits. In other words, as soon as the main program exits, all the daemon threads are killed. To declare a thread as daemon, we set the keyword argument, daemon as True. For Example in the given code it demonstrates the property of daemon threads.
Python3
# Python program killing# thread using daemon import threadingimport timeimport sys def func(): while True: time.sleep(0.5) print('Thread alive, but it will die on program termination') t1 = threading.Thread(target=func)t1.daemon = Truet1.start()time.sleep(2)sys.exit()
Notice that, as soon as the main program exits, the thread t1 is killed. This method proves to be extremely useful in cases where program termination can be used to trigger the killing of threads. Note that in Python, the main program terminates as soon as all the non-daemon threads are dead, irrespective of the number of daemon threads alive. Hence, the resources held by these daemon threads, such as open files, database transactions, etc. may not be released properly. The initial thread of control in a python program is not a daemon thread. Killing a thread forcibly is not recommended unless it is known for sure, that doing so will not cause any leaks or deadlocks. Using a hidden function _stop() : In order to kill a thread, we use hidden function _stop() this function is not documented but might disappear in the next version of python.
Python3
# Python program killing# a thread using ._stop()# function import timeimport threading class MyThread(threading.Thread): # Thread class with a _stop() method. # The thread itself has to check # regularly for the stopped() condition. def __init__(self, *args, **kwargs): super(MyThread, self).__init__(*args, **kwargs) self._stop = threading.Event() # function using _stop function def stop(self): self._stop.set() def stopped(self): return self._stop.isSet() def run(self): while True: if self.stopped(): return print("Hello, world!") time.sleep(1) t1 = MyThread() t1.start()time.sleep(5)t1.stop()t1.join()
Note: Above methods might not work in some situation or another, because python does not provide any direct method to kill threads.
nandavardhanthupalli
anikakapoor
Picked
Python-multithreading
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
Read a file line by line in Python
How to Install PIP on Windows ?
Enumerate() in Python
Different ways to create Pandas Dataframe
Iterate over a list in Python
Python String | replace()
*args and **kwargs in Python
Reading and Writing to text files in Python
Create a Pandas DataFrame from Lists
|
[
{
"code": null,
"e": 25977,
"s": 25949,
"text": "\n20 Jul, 2021"
},
{
"code": null,
"e": 26340,
"s": 25977,
"text": "In general, killing threads abruptly is considered a bad programming practice. Killing a thread abruptly might leave a critical resource that must be closed properly, open. But you might want to kill a thread once some specific time period has passed or some interrupt has been generated. There are the various methods by which you can kill a thread in python. "
},
{
"code": null,
"e": 26378,
"s": 26340,
"text": "Raising exceptions in a python thread"
},
{
"code": null,
"e": 26398,
"s": 26378,
"text": "Set/Reset stop flag"
},
{
"code": null,
"e": 26427,
"s": 26398,
"text": "Using traces to kill threads"
},
{
"code": null,
"e": 26476,
"s": 26427,
"text": "Using the multiprocessing module to kill threads"
},
{
"code": null,
"e": 26522,
"s": 26476,
"text": "Killing Python thread by setting it as daemon"
},
{
"code": null,
"e": 26554,
"s": 26522,
"text": "Using a hidden function _stop()"
},
{
"code": null,
"e": 26706,
"s": 26554,
"text": "Raising exceptions in a python thread : This method uses the function PyThreadState_SetAsyncExc() to raise an exception in the a thread. For Example, "
},
{
"code": null,
"e": 26714,
"s": 26706,
"text": "Python3"
},
{
"code": "# Python program raising# exceptions in a python# thread import threadingimport ctypesimport time class thread_with_exception(threading.Thread): def __init__(self, name): threading.Thread.__init__(self) self.name = name def run(self): # target function of the thread class try: while True: print('running ' + self.name) finally: print('ended') def get_id(self): # returns id of the respective thread if hasattr(self, '_thread_id'): return self._thread_id for id, thread in threading._active.items(): if thread is self: return id def raise_exception(self): thread_id = self.get_id() res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit)) if res > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0) print('Exception raise failure') t1 = thread_with_exception('Thread 1')t1.start()time.sleep(2)t1.raise_exception()t1.join()",
"e": 27810,
"s": 26714,
"text": null
},
{
"code": null,
"e": 28446,
"s": 27810,
"text": "When we run the code above in a machine and you will notice, as soon as the function raise_exception() is called, the target function run() ends. This is because as soon as an exception is raised, program control jumps out of the try block and run() function is terminated. After that join() function can be called to kill the thread. In the absence of the function run_exception(), the target function run() keeps running forever and join() function is never called to kill the thread. Set/Reset stop flag : In order to kill a threads, we can declare a stop flag and this flag will be check occasionally by the thread. For Example "
},
{
"code": null,
"e": 28454,
"s": 28446,
"text": "Python3"
},
{
"code": "# Python program showing# how to kill threads# using set/reset stop# flag import threadingimport time def run(): while True: print('thread running') global stop_threads if stop_threads: break stop_threads = Falset1 = threading.Thread(target = run)t1.start()time.sleep(1)stop_threads = Truet1.join()print('thread killed')",
"e": 28810,
"s": 28454,
"text": null
},
{
"code": null,
"e": 29141,
"s": 28810,
"text": "In the above code, as soon as the global variable stop_threads is set, the target function run() ends and the thread t1 can be killed by using t1.join(). But one may refrain from using global variable due to certain reasons. For those situations, function objects can be passed to provide a similar functionality as shown below. "
},
{
"code": null,
"e": 29149,
"s": 29141,
"text": "Python3"
},
{
"code": "# Python program killing# threads using stop# flag import threadingimport time def run(stop): while True: print('thread running') if stop(): break def main(): stop_threads = False t1 = threading.Thread(target = run, args =(lambda : stop_threads, )) t1.start() time.sleep(1) stop_threads = True t1.join() print('thread killed')main()",
"e": 29579,
"s": 29149,
"text": null
},
{
"code": null,
"e": 30043,
"s": 29579,
"text": "The function object passed in the above code always returns the value of the local variable stop_threads. This value is checked in the function run(), and as soon as stop_threads is reset, the run() function ends and the thread can be killed. Using traces to kill threads : This methods works by installing traces in each thread. Each trace terminates itself on the detection of some stimulus or flag, thus instantly killing the associated thread. For Example "
},
{
"code": null,
"e": 30051,
"s": 30043,
"text": "Python3"
},
{
"code": "# Python program using# traces to kill threads import sysimport traceimport threadingimport timeclass thread_with_trace(threading.Thread): def __init__(self, *args, **keywords): threading.Thread.__init__(self, *args, **keywords) self.killed = False def start(self): self.__run_backup = self.run self.run = self.__run threading.Thread.start(self) def __run(self): sys.settrace(self.globaltrace) self.__run_backup() self.run = self.__run_backup def globaltrace(self, frame, event, arg): if event == 'call': return self.localtrace else: return None def localtrace(self, frame, event, arg): if self.killed: if event == 'line': raise SystemExit() return self.localtrace def kill(self): self.killed = True def func(): while True: print('thread running') t1 = thread_with_trace(target = func)t1.start()time.sleep(2)t1.kill()t1.join()if not t1.isAlive(): print('thread killed')",
"e": 31003,
"s": 30051,
"text": null
},
{
"code": null,
"e": 31746,
"s": 31003,
"text": "In this code, start() is slightly modified to set the system trace function using settrace(). The local trace function is defined such that, whenever the kill flag (killed) of the respective thread is set, a SystemExit exception is raised upon the execution of the next line of code, which end the execution of the target function func. Now the thread can be killed with join(). Using the multiprocessing module to kill threads : The multiprocessing module of Python allows you to spawn processes in the similar way you spawn threads using the threading module. The interface of the multithreading module is similar to that of the threading module. For Example, in a given code we created three threads(processes) which count from 1 to 9. "
},
{
"code": null,
"e": 31754,
"s": 31746,
"text": "Python3"
},
{
"code": "# Python program creating# three threads import threadingimport time # counts from 1 to 9def func(number): for i in range(1, 10): time.sleep(0.01) print('Thread ' + str(number) + ': prints ' + str(number*i)) # creates 3 threadsfor i in range(0, 3): thread = threading.Thread(target=func, args=(i,)) thread.start()",
"e": 32091,
"s": 31754,
"text": null
},
{
"code": null,
"e": 32259,
"s": 32091,
"text": "The functionality of the above code can also be implemented by using the multiprocessing module in a similar manner, with very few changes. See the code given below. "
},
{
"code": null,
"e": 32267,
"s": 32259,
"text": "Python3"
},
{
"code": "# Python program creating# thread using multiprocessing# module import multiprocessingimport time def func(number): for i in range(1, 10): time.sleep(0.01) print('Processing ' + str(number) + ': prints ' + str(number*i)) for i in range(0, 3): process = multiprocessing.Process(target=func, args=(i,)) process.start()",
"e": 32607,
"s": 32267,
"text": null
},
{
"code": null,
"e": 33171,
"s": 32607,
"text": "Though the interface of the two modules is similar, the two modules have very different implementations. All the threads share global variables, whereas processes are completely separate from each other. Hence, killing processes is much safer as compared to killing threads. The Process class is provided a method, terminate(), to kill a process. Now, getting back to the initial problem. Suppose in the above code, we want to kill all the processes after 0.03s have passed. This functionality is achieved using the multiprocessing module in the following code. "
},
{
"code": null,
"e": 33179,
"s": 33171,
"text": "Python3"
},
{
"code": "# Python program killing# a thread using multiprocessing# module import multiprocessingimport time def func(number): for i in range(1, 10): time.sleep(0.01) print('Processing ' + str(number) + ': prints ' + str(number*i)) # list of all processes, so that they can be killed afterwardsall_processes = [] for i in range(0, 3): process = multiprocessing.Process(target=func, args=(i,)) process.start() all_processes.append(process) # kill all processes after 0.03stime.sleep(0.03)for process in all_processes: process.terminate()",
"e": 33735,
"s": 33179,
"text": null
},
{
"code": null,
"e": 34182,
"s": 33735,
"text": "Though the two modules have different implementations. This functionality provided by the multiprocessing module in the above code is similar to killing threads. Hence, the multiprocessing module can be used as a simple alternative whenever we are required to implement the killing of threads in Python. Killing Python thread by setting it as daemon : Daemon threads are those threads which are killed when the main program exits. For Example "
},
{
"code": null,
"e": 34190,
"s": 34182,
"text": "Python3"
},
{
"code": "import threadingimport timeimport sys def func(): while True: time.sleep(0.5) print(\"Thread alive, and it won't die on program termination\") t1 = threading.Thread(target=func)t1.start()time.sleep(2)sys.exit()",
"e": 34416,
"s": 34190,
"text": null
},
{
"code": null,
"e": 34900,
"s": 34416,
"text": "Notice that, thread t1 stays alive and prevents the main program to exit via sys.exit(). In Python, any alive non-daemon thread blocks the main program to exit. Whereas, daemon threads themselves are killed as soon as the main program exits. In other words, as soon as the main program exits, all the daemon threads are killed. To declare a thread as daemon, we set the keyword argument, daemon as True. For Example in the given code it demonstrates the property of daemon threads. "
},
{
"code": null,
"e": 34908,
"s": 34900,
"text": "Python3"
},
{
"code": "# Python program killing# thread using daemon import threadingimport timeimport sys def func(): while True: time.sleep(0.5) print('Thread alive, but it will die on program termination') t1 = threading.Thread(target=func)t1.daemon = Truet1.start()time.sleep(2)sys.exit()",
"e": 35195,
"s": 34908,
"text": null
},
{
"code": null,
"e": 36048,
"s": 35195,
"text": "Notice that, as soon as the main program exits, the thread t1 is killed. This method proves to be extremely useful in cases where program termination can be used to trigger the killing of threads. Note that in Python, the main program terminates as soon as all the non-daemon threads are dead, irrespective of the number of daemon threads alive. Hence, the resources held by these daemon threads, such as open files, database transactions, etc. may not be released properly. The initial thread of control in a python program is not a daemon thread. Killing a thread forcibly is not recommended unless it is known for sure, that doing so will not cause any leaks or deadlocks. Using a hidden function _stop() : In order to kill a thread, we use hidden function _stop() this function is not documented but might disappear in the next version of python. "
},
{
"code": null,
"e": 36056,
"s": 36048,
"text": "Python3"
},
{
"code": "# Python program killing# a thread using ._stop()# function import timeimport threading class MyThread(threading.Thread): # Thread class with a _stop() method. # The thread itself has to check # regularly for the stopped() condition. def __init__(self, *args, **kwargs): super(MyThread, self).__init__(*args, **kwargs) self._stop = threading.Event() # function using _stop function def stop(self): self._stop.set() def stopped(self): return self._stop.isSet() def run(self): while True: if self.stopped(): return print(\"Hello, world!\") time.sleep(1) t1 = MyThread() t1.start()time.sleep(5)t1.stop()t1.join()",
"e": 36776,
"s": 36056,
"text": null
},
{
"code": null,
"e": 36909,
"s": 36776,
"text": "Note: Above methods might not work in some situation or another, because python does not provide any direct method to kill threads. "
},
{
"code": null,
"e": 36930,
"s": 36909,
"text": "nandavardhanthupalli"
},
{
"code": null,
"e": 36942,
"s": 36930,
"text": "anikakapoor"
},
{
"code": null,
"e": 36949,
"s": 36942,
"text": "Picked"
},
{
"code": null,
"e": 36971,
"s": 36949,
"text": "Python-multithreading"
},
{
"code": null,
"e": 36978,
"s": 36971,
"text": "Python"
},
{
"code": null,
"e": 37076,
"s": 36978,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 37094,
"s": 37076,
"text": "Python Dictionary"
},
{
"code": null,
"e": 37129,
"s": 37094,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 37161,
"s": 37129,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 37183,
"s": 37161,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 37225,
"s": 37183,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 37255,
"s": 37225,
"text": "Iterate over a list in Python"
},
{
"code": null,
"e": 37281,
"s": 37255,
"text": "Python String | replace()"
},
{
"code": null,
"e": 37310,
"s": 37281,
"text": "*args and **kwargs in Python"
},
{
"code": null,
"e": 37354,
"s": 37310,
"text": "Reading and Writing to text files in Python"
}
] |
AngularJS | ng-src Directive - GeeksforGeeks
|
28 Mar, 2019
The ng-src Directive in AngularJS is used to specify the src attribute of an <img> element. It ensures that the wrong image is not produced until AngularJS has been evaluated. It is supported by <img> element.
Syntax:
<img ng-src="url"> </img>
Example:
<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>ng-src Directive</title> <script src= "https://ajax.googleapis.com/ajax/libs/angularjs/1.4.2/angular.min.js"> </script> </head> <body ng-app="app" style="text-align:center"> <h1 style="color:green">GeeksforGeeks</h1> <h2>ng-src Directive</h2> <div ng-controller="geek"> <img ng-src="{{pic}}" /><br><br><br> <script> var app = angular.module("app", []); app.controller('geek', ['$scope', function ($scope) { $scope.pic = "https://media.geeksforgeeks.org/wp-content/uploads/20190328034223/ngimg1.png"; }]); </script> </body></html>
Output:
AngularJS-Directives
AngularJS
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Angular PrimeNG Dropdown Component
Angular PrimeNG Calendar Component
Angular 10 (blur) Event
Angular PrimeNG Messages Component
How to make a Bootstrap Modal Popup in Angular 9/8 ?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
Top 10 Projects For Beginners To Practice HTML and CSS Skills
|
[
{
"code": null,
"e": 26354,
"s": 26326,
"text": "\n28 Mar, 2019"
},
{
"code": null,
"e": 26564,
"s": 26354,
"text": "The ng-src Directive in AngularJS is used to specify the src attribute of an <img> element. It ensures that the wrong image is not produced until AngularJS has been evaluated. It is supported by <img> element."
},
{
"code": null,
"e": 26572,
"s": 26564,
"text": "Syntax:"
},
{
"code": null,
"e": 26600,
"s": 26572,
"text": "<img ng-src=\"url\"> </img> \n"
},
{
"code": null,
"e": 26609,
"s": 26600,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html xmlns=\"http://www.w3.org/1999/xhtml\"> <head> <title>ng-src Directive</title> <script src= \"https://ajax.googleapis.com/ajax/libs/angularjs/1.4.2/angular.min.js\"> </script> </head> <body ng-app=\"app\" style=\"text-align:center\"> <h1 style=\"color:green\">GeeksforGeeks</h1> <h2>ng-src Directive</h2> <div ng-controller=\"geek\"> <img ng-src=\"{{pic}}\" /><br><br><br> <script> var app = angular.module(\"app\", []); app.controller('geek', ['$scope', function ($scope) { $scope.pic = \"https://media.geeksforgeeks.org/wp-content/uploads/20190328034223/ngimg1.png\"; }]); </script> </body></html>",
"e": 27346,
"s": 26609,
"text": null
},
{
"code": null,
"e": 27354,
"s": 27346,
"text": "Output:"
},
{
"code": null,
"e": 27375,
"s": 27354,
"text": "AngularJS-Directives"
},
{
"code": null,
"e": 27385,
"s": 27375,
"text": "AngularJS"
},
{
"code": null,
"e": 27402,
"s": 27385,
"text": "Web Technologies"
},
{
"code": null,
"e": 27500,
"s": 27402,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27535,
"s": 27500,
"text": "Angular PrimeNG Dropdown Component"
},
{
"code": null,
"e": 27570,
"s": 27535,
"text": "Angular PrimeNG Calendar Component"
},
{
"code": null,
"e": 27594,
"s": 27570,
"text": "Angular 10 (blur) Event"
},
{
"code": null,
"e": 27629,
"s": 27594,
"text": "Angular PrimeNG Messages Component"
},
{
"code": null,
"e": 27682,
"s": 27629,
"text": "How to make a Bootstrap Modal Popup in Angular 9/8 ?"
},
{
"code": null,
"e": 27722,
"s": 27682,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 27755,
"s": 27722,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 27800,
"s": 27755,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 27843,
"s": 27800,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
Filtering row which contains a certain string using Dplyr in R - GeeksforGeeks
|
28 Jul, 2021
In this article, we will learn how to filter rows that contain a certain string using dplyr package in R programming language.
Two main functions which will be used to carry out this task are:
filter(): dplyr packageβs filter function will be used for filtering rows based on condition
Syntax: filter(df , condition)
Parameter :
df: The data frame object
condition: The condition to filter the data upon
grepl(): grepl() function will is used to return the value TRUE if the specified string pattern is found in the vector and FALSE if it is not found.
Syntax: grepl(pattern, string, ignore.case=FALSE)
Parameters:
pattern: regular expressions pattern
string: character vector to be searched
ignore.case: whether to ignore case in the search. Here ignore.case is an optional parameter as is set to FALSE by default.
Here we have to pass the string to be searched in the grepl() function and the column to search in, this function returns true or false according to which filter() function prints the rows.
Syntax: df %>% filter(grepl(βPatternβ, column_name))
Parameters:
df: Dataframe object
grepl(): finds the pattern String
βPatternβ: pattern(string) to be found
column_name: pattern(string) will be searched in this column
Example:
R
library(dplyr)df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(grepl('Dev', roles))
Output:
marks age roles
1 30.2 22 Software Dev
2 60.5 25 FrontEnd Dev
Note the only difference in this code from the above approach is that here we are using a β!β not operator, this operator inverts the output provided by the grepl() function by converting TRUE to FALSE and vice versa, this in result only prints the rows which does not contain the patterns and filter outs the rows containing the pattern.
Syntax: df %>% filter(!grepl(βPatternβ, column_name))
Parameters:
df: Dataframe object
grepl(): finds the pattern String
βPatternβ: pattern(string) to be found
column_name: pattern(string) will be searched in this column
Example:
R
library(dplyr) df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(!grepl('Eng.', roles))
Output:
marks age roles
1 30.2 22 Software Dev
2 40.3 23 Data Analyst
3 60.5 25 FrontEnd Dev
This code is also similar to the above approaches the only difference is that while passing the multiple patterns(string) in the grepl() function, the patterns are separated with the OR(β | β) operator. This prints all the rows containing the specified pattern.
Syntax:
df %>% filter(grepl(βPatt.1 | Patt.2β, column_name))
Example:
R
library(dplyr) df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(grepl('Dev|Eng.', roles))
Output:
marks age roles
1 20.1 21 Software Eng.
2 30.2 22 Software Dev
3 50.4 24 Data Eng.
4 60.5 25 FrontEnd Dev
This code is similar to the above approach, the only difference is that we are using β!β not operator, this operator inverts the output provided by the grepl() function by converting TRUE to FALSE and vice versa, this in result only prints the rows which do not contain the specified multiple patterns and filter outs the rows containing the patterns.
Syntax:
df %>% filter(!grepl(βPatt.1 | Patt.2β, column_name))
Example:
R
library(dplyr) df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(!grepl('Data|Front', roles))
Output:
marks age roles
1 20.1 21 Software Eng.
2 30.2 22 Software Dev
Picked
R Dplyr
R Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Change Color of Bars in Barchart using ggplot2 in R
Group by function in R using Dplyr
How to Change Axis Scales in R Plots?
How to Split Column Into Multiple Columns in R DataFrame?
Replace Specific Characters in String in R
How to filter R DataFrame by values in a column?
How to import an Excel File into R ?
R - if statement
Time Series Analysis in R
How to filter R dataframe by multiple conditions?
|
[
{
"code": null,
"e": 26487,
"s": 26459,
"text": "\n28 Jul, 2021"
},
{
"code": null,
"e": 26614,
"s": 26487,
"text": "In this article, we will learn how to filter rows that contain a certain string using dplyr package in R programming language."
},
{
"code": null,
"e": 26680,
"s": 26614,
"text": "Two main functions which will be used to carry out this task are:"
},
{
"code": null,
"e": 26773,
"s": 26680,
"text": "filter(): dplyr packageβs filter function will be used for filtering rows based on condition"
},
{
"code": null,
"e": 26804,
"s": 26773,
"text": "Syntax: filter(df , condition)"
},
{
"code": null,
"e": 26816,
"s": 26804,
"text": "Parameter :"
},
{
"code": null,
"e": 26843,
"s": 26816,
"text": "df: The data frame object"
},
{
"code": null,
"e": 26893,
"s": 26843,
"text": "condition: The condition to filter the data upon"
},
{
"code": null,
"e": 27042,
"s": 26893,
"text": "grepl(): grepl() function will is used to return the value TRUE if the specified string pattern is found in the vector and FALSE if it is not found."
},
{
"code": null,
"e": 27092,
"s": 27042,
"text": "Syntax: grepl(pattern, string, ignore.case=FALSE)"
},
{
"code": null,
"e": 27104,
"s": 27092,
"text": "Parameters:"
},
{
"code": null,
"e": 27141,
"s": 27104,
"text": "pattern: regular expressions pattern"
},
{
"code": null,
"e": 27181,
"s": 27141,
"text": "string: character vector to be searched"
},
{
"code": null,
"e": 27305,
"s": 27181,
"text": "ignore.case: whether to ignore case in the search. Here ignore.case is an optional parameter as is set to FALSE by default."
},
{
"code": null,
"e": 27495,
"s": 27305,
"text": "Here we have to pass the string to be searched in the grepl() function and the column to search in, this function returns true or false according to which filter() function prints the rows."
},
{
"code": null,
"e": 27548,
"s": 27495,
"text": "Syntax: df %>% filter(grepl(βPatternβ, column_name))"
},
{
"code": null,
"e": 27560,
"s": 27548,
"text": "Parameters:"
},
{
"code": null,
"e": 27581,
"s": 27560,
"text": "df: Dataframe object"
},
{
"code": null,
"e": 27615,
"s": 27581,
"text": "grepl(): finds the pattern String"
},
{
"code": null,
"e": 27654,
"s": 27615,
"text": "βPatternβ: pattern(string) to be found"
},
{
"code": null,
"e": 27715,
"s": 27654,
"text": "column_name: pattern(string) will be searched in this column"
},
{
"code": null,
"e": 27725,
"s": 27715,
"text": "Example: "
},
{
"code": null,
"e": 27727,
"s": 27725,
"text": "R"
},
{
"code": "library(dplyr)df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(grepl('Dev', roles))",
"e": 28048,
"s": 27727,
"text": null
},
{
"code": null,
"e": 28056,
"s": 28048,
"text": "Output:"
},
{
"code": null,
"e": 28131,
"s": 28056,
"text": " marks age roles\n1 30.2 22 Software Dev\n2 60.5 25 FrontEnd Dev"
},
{
"code": null,
"e": 28470,
"s": 28131,
"text": "Note the only difference in this code from the above approach is that here we are using a β!β not operator, this operator inverts the output provided by the grepl() function by converting TRUE to FALSE and vice versa, this in result only prints the rows which does not contain the patterns and filter outs the rows containing the pattern."
},
{
"code": null,
"e": 28524,
"s": 28470,
"text": "Syntax: df %>% filter(!grepl(βPatternβ, column_name))"
},
{
"code": null,
"e": 28536,
"s": 28524,
"text": "Parameters:"
},
{
"code": null,
"e": 28557,
"s": 28536,
"text": "df: Dataframe object"
},
{
"code": null,
"e": 28591,
"s": 28557,
"text": "grepl(): finds the pattern String"
},
{
"code": null,
"e": 28630,
"s": 28591,
"text": "βPatternβ: pattern(string) to be found"
},
{
"code": null,
"e": 28691,
"s": 28630,
"text": "column_name: pattern(string) will be searched in this column"
},
{
"code": null,
"e": 28701,
"s": 28691,
"text": "Example: "
},
{
"code": null,
"e": 28703,
"s": 28701,
"text": "R"
},
{
"code": "library(dplyr) df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(!grepl('Eng.', roles))",
"e": 29026,
"s": 28703,
"text": null
},
{
"code": null,
"e": 29034,
"s": 29026,
"text": "Output:"
},
{
"code": null,
"e": 29134,
"s": 29034,
"text": " marks age roles\n1 30.2 22 Software Dev\n2 40.3 23 Data Analyst\n3 60.5 25 FrontEnd Dev"
},
{
"code": null,
"e": 29396,
"s": 29134,
"text": "This code is also similar to the above approaches the only difference is that while passing the multiple patterns(string) in the grepl() function, the patterns are separated with the OR(β | β) operator. This prints all the rows containing the specified pattern."
},
{
"code": null,
"e": 29405,
"s": 29396,
"text": "Syntax: "
},
{
"code": null,
"e": 29458,
"s": 29405,
"text": "df %>% filter(grepl(βPatt.1 | Patt.2β, column_name))"
},
{
"code": null,
"e": 29467,
"s": 29458,
"text": "Example:"
},
{
"code": null,
"e": 29469,
"s": 29467,
"text": "R"
},
{
"code": "library(dplyr) df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(grepl('Dev|Eng.', roles))",
"e": 29796,
"s": 29469,
"text": null
},
{
"code": null,
"e": 29804,
"s": 29796,
"text": "Output:"
},
{
"code": null,
"e": 29933,
"s": 29804,
"text": " marks age roles\n1 20.1 21 Software Eng.\n2 30.2 22 Software Dev\n3 50.4 24 Data Eng.\n4 60.5 25 FrontEnd Dev"
},
{
"code": null,
"e": 30285,
"s": 29933,
"text": "This code is similar to the above approach, the only difference is that we are using β!β not operator, this operator inverts the output provided by the grepl() function by converting TRUE to FALSE and vice versa, this in result only prints the rows which do not contain the specified multiple patterns and filter outs the rows containing the patterns."
},
{
"code": null,
"e": 30294,
"s": 30285,
"text": "Syntax: "
},
{
"code": null,
"e": 30348,
"s": 30294,
"text": "df %>% filter(!grepl(βPatt.1 | Patt.2β, column_name))"
},
{
"code": null,
"e": 30358,
"s": 30348,
"text": "Example: "
},
{
"code": null,
"e": 30360,
"s": 30358,
"text": "R"
},
{
"code": "library(dplyr) df <- data.frame( marks = c(20.1, 30.2, 40.3, 50.4, 60.5), age = c(21:25), roles = c('Software Eng.', 'Software Dev', 'Data Analyst', 'Data Eng.', 'FrontEnd Dev')) df %>% filter(!grepl('Data|Front', roles))",
"e": 30690,
"s": 30360,
"text": null
},
{
"code": null,
"e": 30698,
"s": 30690,
"text": "Output:"
},
{
"code": null,
"e": 30776,
"s": 30698,
"text": " marks age roles\n1 20.1 21 Software Eng.\n2 30.2 22 Software Dev"
},
{
"code": null,
"e": 30783,
"s": 30776,
"text": "Picked"
},
{
"code": null,
"e": 30791,
"s": 30783,
"text": "R Dplyr"
},
{
"code": null,
"e": 30802,
"s": 30791,
"text": "R Language"
},
{
"code": null,
"e": 30900,
"s": 30802,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30952,
"s": 30900,
"text": "Change Color of Bars in Barchart using ggplot2 in R"
},
{
"code": null,
"e": 30987,
"s": 30952,
"text": "Group by function in R using Dplyr"
},
{
"code": null,
"e": 31025,
"s": 30987,
"text": "How to Change Axis Scales in R Plots?"
},
{
"code": null,
"e": 31083,
"s": 31025,
"text": "How to Split Column Into Multiple Columns in R DataFrame?"
},
{
"code": null,
"e": 31126,
"s": 31083,
"text": "Replace Specific Characters in String in R"
},
{
"code": null,
"e": 31175,
"s": 31126,
"text": "How to filter R DataFrame by values in a column?"
},
{
"code": null,
"e": 31212,
"s": 31175,
"text": "How to import an Excel File into R ?"
},
{
"code": null,
"e": 31229,
"s": 31212,
"text": "R - if statement"
},
{
"code": null,
"e": 31255,
"s": 31229,
"text": "Time Series Analysis in R"
}
] |
Assigning long values carefully in Java to avoid overflow - GeeksforGeeks
|
07 Dec, 2018
Predict the output of the following program
public class LongDivision { public static void main(String[] args) { final long MICROS_PER_DAY = 24 * 60 * 60 * 1000 * 1000; final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000; System.out.println(MICROS_PER_DAY / MILLIS_PER_DAY); }}
Solution:
Both the divisor and the dividend are of type long, which is easily large enough to hold either product without overflow. It seems, then, that the program must print 1000. Unfortunately, it prints 5. What exactly is going on here? The problem is that the computation of the constant MICROS_PER_DAY does overflow. Although the result of the computation fits in a long with room to spare, it doesnβt fit in an int. The computation is performed entirely in int arithmetic, and only after the computation completes is the result promoted to a long. By then, itβs too late: The computation has already overflowed.The promotion from int to long is a widening primitive conversion, which preserves the (incorrect) numerical value. This value is then divided by MILLIS_PER_DAY, which was computed correctly because it does fit in an int. The result of this division is 5. So why is the computation performed in int arithmetic? Because all the factors that are multiplied together are int values.When you multiply two int values, you get another int value. Java does not have target typing, a language feature wherein the type of the variable in which a result is to be stored influences the type of the computation. Itβs easy to fix the program by using a long literal in place of an int as the first factor in each product. This forces all subsequent computations in the expression to be done with long arithmetic. Although it is necessary to do this only in the expression for MICROS_PER_DAY, it is good form to do it in both products.Similarly, it isnβt always necessary to use a long as the first value in a product, but it is good form to do so. Beginning both computations with long values makes it clear that they wonβt overflow. This program prints 1000 as expected:
public class LongDivision { public static void main(String[] args) { final long MICROS_PER_DAY = 24L * 60 * 60 * 1000 * 1000; final long MILLIS_PER_DAY = 24L * 60 * 60 * 1000; System.out.println(MICROS_PER_DAY / MILLIS_PER_DAY); }}
Output:
1000
The lesson is simple: When working with large numbers, watch out for overflowβitβs a silent killer.
This article is contributed by Shubham Juneja. If you like GeeksforGeeks and would like to contribute, you can also write an article using contribute.geeksforgeeks.org or mail your article to contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.
Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
java-basics
Java-Data Types
Java-Output
java-puzzle
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Stream In Java
Constructors in Java
Exceptions in Java
Functional Interfaces in Java
Different ways of Reading a text file in Java
Generics in Java
Introduction to Java
Comparator Interface in Java with Examples
Internal Working of HashMap in Java
Strings in Java
|
[
{
"code": null,
"e": 25249,
"s": 25221,
"text": "\n07 Dec, 2018"
},
{
"code": null,
"e": 25293,
"s": 25249,
"text": "Predict the output of the following program"
},
{
"code": "public class LongDivision { public static void main(String[] args) { final long MICROS_PER_DAY = 24 * 60 * 60 * 1000 * 1000; final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000; System.out.println(MICROS_PER_DAY / MILLIS_PER_DAY); }}",
"e": 25540,
"s": 25293,
"text": null
},
{
"code": null,
"e": 25550,
"s": 25540,
"text": "Solution:"
},
{
"code": null,
"e": 27317,
"s": 25550,
"text": "Both the divisor and the dividend are of type long, which is easily large enough to hold either product without overflow. It seems, then, that the program must print 1000. Unfortunately, it prints 5. What exactly is going on here? The problem is that the computation of the constant MICROS_PER_DAY does overflow. Although the result of the computation fits in a long with room to spare, it doesnβt fit in an int. The computation is performed entirely in int arithmetic, and only after the computation completes is the result promoted to a long. By then, itβs too late: The computation has already overflowed.The promotion from int to long is a widening primitive conversion, which preserves the (incorrect) numerical value. This value is then divided by MILLIS_PER_DAY, which was computed correctly because it does fit in an int. The result of this division is 5. So why is the computation performed in int arithmetic? Because all the factors that are multiplied together are int values.When you multiply two int values, you get another int value. Java does not have target typing, a language feature wherein the type of the variable in which a result is to be stored influences the type of the computation. Itβs easy to fix the program by using a long literal in place of an int as the first factor in each product. This forces all subsequent computations in the expression to be done with long arithmetic. Although it is necessary to do this only in the expression for MICROS_PER_DAY, it is good form to do it in both products.Similarly, it isnβt always necessary to use a long as the first value in a product, but it is good form to do so. Beginning both computations with long values makes it clear that they wonβt overflow. This program prints 1000 as expected:"
},
{
"code": "public class LongDivision { public static void main(String[] args) { final long MICROS_PER_DAY = 24L * 60 * 60 * 1000 * 1000; final long MILLIS_PER_DAY = 24L * 60 * 60 * 1000; System.out.println(MICROS_PER_DAY / MILLIS_PER_DAY); }}",
"e": 27566,
"s": 27317,
"text": null
},
{
"code": null,
"e": 27574,
"s": 27566,
"text": "Output:"
},
{
"code": null,
"e": 27580,
"s": 27574,
"text": "1000\n"
},
{
"code": null,
"e": 27680,
"s": 27580,
"text": "The lesson is simple: When working with large numbers, watch out for overflowβitβs a silent killer."
},
{
"code": null,
"e": 27982,
"s": 27680,
"text": "This article is contributed by Shubham Juneja. If you like GeeksforGeeks and would like to contribute, you can also write an article using contribute.geeksforgeeks.org or mail your article to contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks."
},
{
"code": null,
"e": 28107,
"s": 27982,
"text": "Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above."
},
{
"code": null,
"e": 28119,
"s": 28107,
"text": "java-basics"
},
{
"code": null,
"e": 28135,
"s": 28119,
"text": "Java-Data Types"
},
{
"code": null,
"e": 28147,
"s": 28135,
"text": "Java-Output"
},
{
"code": null,
"e": 28159,
"s": 28147,
"text": "java-puzzle"
},
{
"code": null,
"e": 28164,
"s": 28159,
"text": "Java"
},
{
"code": null,
"e": 28169,
"s": 28164,
"text": "Java"
},
{
"code": null,
"e": 28267,
"s": 28169,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28282,
"s": 28267,
"text": "Stream In Java"
},
{
"code": null,
"e": 28303,
"s": 28282,
"text": "Constructors in Java"
},
{
"code": null,
"e": 28322,
"s": 28303,
"text": "Exceptions in Java"
},
{
"code": null,
"e": 28352,
"s": 28322,
"text": "Functional Interfaces in Java"
},
{
"code": null,
"e": 28398,
"s": 28352,
"text": "Different ways of Reading a text file in Java"
},
{
"code": null,
"e": 28415,
"s": 28398,
"text": "Generics in Java"
},
{
"code": null,
"e": 28436,
"s": 28415,
"text": "Introduction to Java"
},
{
"code": null,
"e": 28479,
"s": 28436,
"text": "Comparator Interface in Java with Examples"
},
{
"code": null,
"e": 28515,
"s": 28479,
"text": "Internal Working of HashMap in Java"
}
] |
PREVIOUSDAY function
|
Returns a table that contains a column of all dates representing the day that is previous to the first date in the dates column, in the current context.
PREVIOUSDAY (<dates>)
dates
A column that contains dates.
A table containing a single column of date values.
The dates parameter can be any of the following β
A reference to a date/time column.
A reference to a date/time column.
A table expression that returns a single column of date/time values.
A table expression that returns a single column of date/time values.
A Boolean expression that defines a single-column table of date/time values.
A Boolean expression that defines a single-column table of date/time values.
Constraints on Boolean expressions β
The expression cannot reference a calculated field.
The expression cannot reference a calculated field.
The expression cannot use CALCULATE function.
The expression cannot use CALCULATE function.
The expression cannot use any function that scans a table or returns a table, including aggregation functions.
The expression cannot use any function that scans a table or returns a table, including aggregation functions.
However, a Boolean expression can use any function that looks up a single value, or that calculates a scalar value.
Previous Day Sales: = CALCULATE (
SUM (Sales[Sales Amount]),PREVIOUSDAY (Sales[Date])
)
53 Lectures
5.5 hours
Abhay Gadiya
24 Lectures
2 hours
Randy Minder
26 Lectures
4.5 hours
Randy Minder
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2154,
"s": 2001,
"text": "Returns a table that contains a column of all dates representing the day that is previous to the first date in the dates column, in the current context."
},
{
"code": null,
"e": 2178,
"s": 2154,
"text": "PREVIOUSDAY (<dates>) \n"
},
{
"code": null,
"e": 2184,
"s": 2178,
"text": "dates"
},
{
"code": null,
"e": 2214,
"s": 2184,
"text": "A column that contains dates."
},
{
"code": null,
"e": 2265,
"s": 2214,
"text": "A table containing a single column of date values."
},
{
"code": null,
"e": 2315,
"s": 2265,
"text": "The dates parameter can be any of the following β"
},
{
"code": null,
"e": 2350,
"s": 2315,
"text": "A reference to a date/time column."
},
{
"code": null,
"e": 2385,
"s": 2350,
"text": "A reference to a date/time column."
},
{
"code": null,
"e": 2454,
"s": 2385,
"text": "A table expression that returns a single column of date/time values."
},
{
"code": null,
"e": 2523,
"s": 2454,
"text": "A table expression that returns a single column of date/time values."
},
{
"code": null,
"e": 2600,
"s": 2523,
"text": "A Boolean expression that defines a single-column table of date/time values."
},
{
"code": null,
"e": 2677,
"s": 2600,
"text": "A Boolean expression that defines a single-column table of date/time values."
},
{
"code": null,
"e": 2714,
"s": 2677,
"text": "Constraints on Boolean expressions β"
},
{
"code": null,
"e": 2766,
"s": 2714,
"text": "The expression cannot reference a calculated field."
},
{
"code": null,
"e": 2818,
"s": 2766,
"text": "The expression cannot reference a calculated field."
},
{
"code": null,
"e": 2864,
"s": 2818,
"text": "The expression cannot use CALCULATE function."
},
{
"code": null,
"e": 2910,
"s": 2864,
"text": "The expression cannot use CALCULATE function."
},
{
"code": null,
"e": 3021,
"s": 2910,
"text": "The expression cannot use any function that scans a table or returns a table, including aggregation functions."
},
{
"code": null,
"e": 3132,
"s": 3021,
"text": "The expression cannot use any function that scans a table or returns a table, including aggregation functions."
},
{
"code": null,
"e": 3248,
"s": 3132,
"text": "However, a Boolean expression can use any function that looks up a single value, or that calculates a scalar value."
},
{
"code": null,
"e": 3341,
"s": 3248,
"text": "Previous Day Sales: = CALCULATE ( \n SUM (Sales[Sales Amount]),PREVIOUSDAY (Sales[Date])\n) "
},
{
"code": null,
"e": 3376,
"s": 3341,
"text": "\n 53 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 3390,
"s": 3376,
"text": " Abhay Gadiya"
},
{
"code": null,
"e": 3423,
"s": 3390,
"text": "\n 24 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 3437,
"s": 3423,
"text": " Randy Minder"
},
{
"code": null,
"e": 3472,
"s": 3437,
"text": "\n 26 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 3486,
"s": 3472,
"text": " Randy Minder"
},
{
"code": null,
"e": 3493,
"s": 3486,
"text": " Print"
},
{
"code": null,
"e": 3504,
"s": 3493,
"text": " Add Notes"
}
] |
Python | Remove List elements containing given String character - GeeksforGeeks
|
22 Apr, 2020
Sometimes, while working with Python lists, we can have problem in which we need to perform the task of removing all the elements of list which contain at least one character of String. This can have application in day-day programming. Lets discuss certain ways in which this task can be performed.
Method #1 : Using loopThis is brute force way in which this task can be performed. In this, we iterate for all list elements and check for occurrence of any character using loop.
# Python3 code to demonstrate working of # Remove List elements containing String character# Using loop # initializing listtest_list = ['567', '840', '649', '7342'] # initializing string test_str = '1237' # printing original listprint("The original list is : " + str(test_list)) # Remove List elements containing String character# Using loopres = []for sub in test_list: flag = 0 for ele in sub: if ele in test_str: flag = 1 if not flag: res.append(sub) # printing result print("The list after removal : " + str(res))
The original list is : ['567', '840', '649', '7342']
The list after removal : ['840', '649']
Method #2 : Using list comprehensionThis is another way to perform this task. This is similar to above method. In this we perform the task in similar way as above just as one liner.
# Python3 code to demonstrate working of # Remove List elements containing String character# Using list comprehension def check_pres(sub, test_str): for ele in sub: if ele in test_str: return 0 return 1 # initializing listtest_list = ['567', '840', '649', '7342'] # initializing string test_str = '1237' # printing original listprint("The original list is : " + str(test_list)) # Remove List elements containing String character# Using list comprehensionres = [ele for ele in test_list if check_pres(ele, test_str)] # printing result print("The list after removal : " + str(res))
The original list is : ['567', '840', '649', '7342']
The list after removal : ['840', '649']
Python list-programs
Python
Python Programs
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Python Dictionary
Read a file line by line in Python
Enumerate() in Python
How to Install PIP on Windows ?
Iterate over a list in Python
Python program to convert a list to string
Defaultdict in Python
Python | Split string into list of characters
Python | Get dictionary keys as a list
Python | Convert a list to dictionary
|
[
{
"code": null,
"e": 25142,
"s": 25114,
"text": "\n22 Apr, 2020"
},
{
"code": null,
"e": 25441,
"s": 25142,
"text": "Sometimes, while working with Python lists, we can have problem in which we need to perform the task of removing all the elements of list which contain at least one character of String. This can have application in day-day programming. Lets discuss certain ways in which this task can be performed."
},
{
"code": null,
"e": 25620,
"s": 25441,
"text": "Method #1 : Using loopThis is brute force way in which this task can be performed. In this, we iterate for all list elements and check for occurrence of any character using loop."
},
{
"code": "# Python3 code to demonstrate working of # Remove List elements containing String character# Using loop # initializing listtest_list = ['567', '840', '649', '7342'] # initializing string test_str = '1237' # printing original listprint(\"The original list is : \" + str(test_list)) # Remove List elements containing String character# Using loopres = []for sub in test_list: flag = 0 for ele in sub: if ele in test_str: flag = 1 if not flag: res.append(sub) # printing result print(\"The list after removal : \" + str(res)) ",
"e": 26178,
"s": 25620,
"text": null
},
{
"code": null,
"e": 26272,
"s": 26178,
"text": "The original list is : ['567', '840', '649', '7342']\nThe list after removal : ['840', '649']\n"
},
{
"code": null,
"e": 26456,
"s": 26274,
"text": "Method #2 : Using list comprehensionThis is another way to perform this task. This is similar to above method. In this we perform the task in similar way as above just as one liner."
},
{
"code": "# Python3 code to demonstrate working of # Remove List elements containing String character# Using list comprehension def check_pres(sub, test_str): for ele in sub: if ele in test_str: return 0 return 1 # initializing listtest_list = ['567', '840', '649', '7342'] # initializing string test_str = '1237' # printing original listprint(\"The original list is : \" + str(test_list)) # Remove List elements containing String character# Using list comprehensionres = [ele for ele in test_list if check_pres(ele, test_str)] # printing result print(\"The list after removal : \" + str(res)) ",
"e": 27071,
"s": 26456,
"text": null
},
{
"code": null,
"e": 27165,
"s": 27071,
"text": "The original list is : ['567', '840', '649', '7342']\nThe list after removal : ['840', '649']\n"
},
{
"code": null,
"e": 27186,
"s": 27165,
"text": "Python list-programs"
},
{
"code": null,
"e": 27193,
"s": 27186,
"text": "Python"
},
{
"code": null,
"e": 27209,
"s": 27193,
"text": "Python Programs"
},
{
"code": null,
"e": 27307,
"s": 27209,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27316,
"s": 27307,
"text": "Comments"
},
{
"code": null,
"e": 27329,
"s": 27316,
"text": "Old Comments"
},
{
"code": null,
"e": 27347,
"s": 27329,
"text": "Python Dictionary"
},
{
"code": null,
"e": 27382,
"s": 27347,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 27404,
"s": 27382,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 27436,
"s": 27404,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 27466,
"s": 27436,
"text": "Iterate over a list in Python"
},
{
"code": null,
"e": 27509,
"s": 27466,
"text": "Python program to convert a list to string"
},
{
"code": null,
"e": 27531,
"s": 27509,
"text": "Defaultdict in Python"
},
{
"code": null,
"e": 27577,
"s": 27531,
"text": "Python | Split string into list of characters"
},
{
"code": null,
"e": 27616,
"s": 27577,
"text": "Python | Get dictionary keys as a list"
}
] |
Convert nested array to string - JavaScript
|
We are required to write a JavaScript function that takes in a nested array of literals and converts it to a string by concatenating all the values present in it to the string
const arr = [
'hello', [
'world', 'how', [
'are', 'you', [
'without', 'me'
]
]
]
];
Letβs say the following is our nested array β
const arr = [
'hello', [
'world', 'how', [
'are', 'you', [
'without', 'me'
]
]
]
];
const arrayToString = (arr) => {
let str = '';
for(let i = 0; i < arr.length; i++){
if(Array.isArray(arr[i])){
str += arrayToString(arr[i]);
}else{
str += arr[i];
};
};
return str;
};
console.log(arrayToString(arr));
Following is the output in the console β
helloworldhowareyouwithoutme
|
[
{
"code": null,
"e": 1238,
"s": 1062,
"text": "We are required to write a JavaScript function that takes in a nested array of literals and converts it to a string by concatenating all the values present in it to the string"
},
{
"code": null,
"e": 1370,
"s": 1238,
"text": "const arr = [\n 'hello', [\n 'world', 'how', [\n 'are', 'you', [\n 'without', 'me'\n ]\n ]\n ]\n];"
},
{
"code": null,
"e": 1416,
"s": 1370,
"text": "Letβs say the following is our nested array β"
},
{
"code": null,
"e": 1813,
"s": 1416,
"text": "const arr = [\n 'hello', [\n 'world', 'how', [\n 'are', 'you', [\n 'without', 'me'\n ]\n ]\n ]\n];\nconst arrayToString = (arr) => {\n let str = '';\n for(let i = 0; i < arr.length; i++){\n if(Array.isArray(arr[i])){\n str += arrayToString(arr[i]);\n }else{\n str += arr[i];\n };\n };\n return str;\n};\nconsole.log(arrayToString(arr));"
},
{
"code": null,
"e": 1854,
"s": 1813,
"text": "Following is the output in the console β"
},
{
"code": null,
"e": 1883,
"s": 1854,
"text": "helloworldhowareyouwithoutme"
}
] |
ssh-keyscan - Unix, Linux Command
|
ssh-keyscan
uses non-blocking socket I/O to contact as many hosts as possible in
parallel, so it is very efficient.
The keys from a domain of 1,000
hosts can be collected in tens of seconds, even when some of those
hosts are down or do not run ssh.
For scanning, one does not need
login access to the machines that are being scanned, nor does the
scanning process involve any encryption.
The options are as follows:
1.2.3.4,1.2.4.4 name.my.domain,name,n.my.domain,n,1.2.3.4,1.2.4.4
Output format for rsa1 keys:
host-or-namelist bits exponent modulus
Output format for rsa and dsa keys:
host-or-namelist keytype base64-encoded-key
Where
keytype
is either
"ssh-rsa"
or
"ssh-dss".
/etc/ssh/ssh_known_hosts
$ ssh-keyscan hostname
Find all hosts from the file
ssh_hosts
which have new or different keys from those in the sorted file
ssh_known_hosts:
$ ssh-keyscan -t rsa,dsa -f ssh_hosts | \
sort -u - ssh_known_hosts | diff ssh_known_hosts -
Advertisements
129 Lectures
23 hours
Eduonix Learning Solutions
5 Lectures
4.5 hours
Frahaan Hussain
35 Lectures
2 hours
Pradeep D
41 Lectures
2.5 hours
Musab Zayadneh
46 Lectures
4 hours
GUHARAJANM
6 Lectures
4 hours
Uplatz
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 10967,
"s": 10577,
"text": "\nssh-keyscan\nuses non-blocking socket I/O to contact as many hosts as possible in\nparallel, so it is very efficient.\nThe keys from a domain of 1,000\nhosts can be collected in tens of seconds, even when some of those\nhosts are down or do not run ssh.\nFor scanning, one does not need\nlogin access to the machines that are being scanned, nor does the\nscanning process involve any encryption.\n"
},
{
"code": null,
"e": 10997,
"s": 10967,
"text": "\nThe options are as follows:\n"
},
{
"code": null,
"e": 11064,
"s": 10997,
"text": "1.2.3.4,1.2.4.4 name.my.domain,name,n.my.domain,n,1.2.3.4,1.2.4.4\n"
},
{
"code": null,
"e": 11096,
"s": 11064,
"text": "\n Output format for rsa1 keys:\n"
},
{
"code": null,
"e": 11136,
"s": 11096,
"text": "host-or-namelist bits exponent modulus\n"
},
{
"code": null,
"e": 11175,
"s": 11136,
"text": "\n Output format for rsa and dsa keys:\n"
},
{
"code": null,
"e": 11220,
"s": 11175,
"text": "host-or-namelist keytype base64-encoded-key\n"
},
{
"code": null,
"e": 11271,
"s": 11220,
"text": "\nWhere\n keytype\nis either\n\"ssh-rsa\"\nor\n\"ssh-dss\".\n"
},
{
"code": null,
"e": 11299,
"s": 11271,
"text": "\n /etc/ssh/ssh_known_hosts\n"
},
{
"code": null,
"e": 11323,
"s": 11299,
"text": "$ ssh-keyscan hostname\n"
},
{
"code": null,
"e": 11446,
"s": 11323,
"text": "\nFind all hosts from the file\n ssh_hosts\nwhich have new or different keys from those in the sorted file\n ssh_known_hosts:\n"
},
{
"code": null,
"e": 11548,
"s": 11446,
"text": "$ ssh-keyscan -t rsa,dsa -f ssh_hosts | \\\n sort -u - ssh_known_hosts | diff ssh_known_hosts -\n"
},
{
"code": null,
"e": 11565,
"s": 11548,
"text": "\nAdvertisements\n"
},
{
"code": null,
"e": 11600,
"s": 11565,
"text": "\n 129 Lectures \n 23 hours \n"
},
{
"code": null,
"e": 11628,
"s": 11600,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 11662,
"s": 11628,
"text": "\n 5 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 11679,
"s": 11662,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 11712,
"s": 11679,
"text": "\n 35 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 11723,
"s": 11712,
"text": " Pradeep D"
},
{
"code": null,
"e": 11758,
"s": 11723,
"text": "\n 41 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 11774,
"s": 11758,
"text": " Musab Zayadneh"
},
{
"code": null,
"e": 11807,
"s": 11774,
"text": "\n 46 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 11819,
"s": 11807,
"text": " GUHARAJANM"
},
{
"code": null,
"e": 11851,
"s": 11819,
"text": "\n 6 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 11859,
"s": 11851,
"text": " Uplatz"
},
{
"code": null,
"e": 11866,
"s": 11859,
"text": " Print"
},
{
"code": null,
"e": 11877,
"s": 11866,
"text": " Add Notes"
}
] |
Time Series Forecasting with Autoregressive Processes | by Marco Peixeiro | Towards Data Science
|
In this hands-on tutorial, we will cover the topic of time series modelling with autoregressive processes.
This article will cover the following key elements in time series analysis:
autoregressive process
Yule-Walker equation
stationarity
Augmented Dicker-Fuller test
Make sure to have a Jupyter notebook ready to follow along. The code and the dataset is available here.
Letβs get started!
Learn how to work with more complex models such as SARIMAX, VARMAX, and apply deep learning models (LSTM, CNN, ResNet, autoregressive LSTM) for time series analysis with Applied Time Series Analysis in Python!
An autoregressive model uses a linear combination of past values of the target to make forecasts. Of course, the regression is made against the target itself. Mathematically, an AR(p) model is expressed as:
Where:
p: is the order
c: is a constant
epsilon: noise
AR(p) model is incredibly flexible and it can model a many different types of time series patterns. This is easily visualized when we simulate autoregressive processes.
Usually, autoregressive models are applied to stationary time series only. This constrains the range of the parameters phi.
For example, an AR(1) model will constrain phi between -1 and 1. Those constraints become more complex as the order of the model increases, but they are automatically considered when modelling in Python.
Letβs simulate an AR(2) process in Python.
We start off by importing some libraries. Not all will be used for the simulation, but they will be required for the rest of this tutorial.
from statsmodels.graphics.tsaplots import plot_pacffrom statsmodels.graphics.tsaplots import plot_acffrom statsmodels.tsa.arima_process import ArmaProcessfrom statsmodels.tsa.stattools import pacffrom statsmodels.regression.linear_model import yule_walkerfrom statsmodels.tsa.stattools import adfullerimport matplotlib.pyplot as pltimport numpy as np%matplotlib inline
We will use the ArmaProcess library to simulate the time series. It requires us to define our parameters.
We will simulate the following process:
Since we are dealing with an autoregressive model of order 2, we need to define the coefficient at lag 0, 1 and 2.
Also, we will cancel the effect of a moving average process.
Finally, we will generate 10 000 data points.
In code:
ar2 = np.array([1, 0.33, 0.5])ma = np.array([1])simulated_AR2_data = ArmaProcess(ar2, ma).generate_sample(nsample=10000)
We can plot the time series:
plt.figure(figsize=[10, 7.5]); # Set dimensions for figureplt.plot(simulated_AR2_data)plt.title("Simulated AR(2) Process")plt.show()
And you should get something similar to this:
Now, letβs take a look at the autocorrelation plot (correlogram):
plot_acf(simulated_AR2_data);
You can see that the coefficient is slowly decaying. This means that it is unlikely a moving average process and it suggests that the time series can probably be modelled with an autoregressive process (which makes sense since that what we are simulating).
To make sure that this is right, letβs plot the partial autocorrelation plot:
plot_pacf(simulated_AR2_data);
As you can see the coefficients are not significant after lag 2. Therefore, the partial autocorrelation plot is useful to determine the order of an AR(p) process.
You can also check the values of each coefficients by running:
pacf_coef_AR2 = pacf(simulated_AR2_data)print(pacf_coef_AR2)
Now, in a real project setting, it can be easy to find the order of an AR(p) process, but we need to find a way to estimate the coefficients phi.
To do so, we use the Yule-Walker equation. This equations allows us to estimate the coefficients of an AR(p) model, given that we know the order.
rho, sigma = yule_walker(simulated_AR2_data, 2, method='mle')print(f'rho: {-rho}')print(f'sigma: {sigma}')
As you can see, the Yule-Walker equation did a decent job at estimating our coefficients and got very close to 0.33 and 0.5.
Now, letβs simulate an AR(3) process. Specifically, we will simulate:
Similarly to what was previously done, letβs define our coefficients and generate 10 000 data points:
ar3 = np.array([1, 0.33, 0.5, 0.07])ma = np.array([1])simulated_AR3_data = ArmaProcess(ar3,ma).generate_sample(nsample=10000)
Then, we can visualize the time series:
plt.figure(figsize=[10, 7.5]); # Set dimensions for figureplt.plot(simulated_AR3_data)plt.title("Simulated AR(3) Process")plt.show()
And you should see something similar to:
Now, looking at the PACF and ACF:
plot_pacf(simulated_AR3_data);plot_acf(simulated_AR3_data);
You see that the coefficients are not significant after lag 3 for the PACF function as expected.
Finally, letβs use the Yule-Walker equation to estimate the coefficients:
rho, sigma = yule_walker(simulated_AR3_data, 3, method='mle')print(f'rho: {-rho}')print(f'sigma: {sigma}')
Again, the estimations are fairly close to the actual values.
Now, letβs apply our knowledge of autoregressive processes in a project setting.
The objective is to model the quarterly earnings per share (EPS) of the company Johnson&Johnson between 1960 and 1980.
First, letβs read the dataset:
import pandas as pddata = pd.read_csv('jj.csv')data.head()
Now, the first five rows are not very useful for us. Letβs plot the entire dataset to get a better visual representation.
plt.figure(figsize=[15, 7.5]); # Set dimensions for figureplt.scatter(data['date'], data['data'])plt.title('Quaterly EPS for Johnson & Johnson')plt.ylabel('EPS per share ($)')plt.xlabel('Date')plt.xticks(rotation=90)plt.grid(True)plt.show()
Awesome! Now we can the there is clear upwards trend in the data. While this may be a good sign for the company, it is not good in terms of time series modelling, since it means that the time series is not stationary.
As aforementioned, the AR(p) process works only for stationary series.
Therefore, we must apply some transformations to our data to make it stationary.
In this case, will take the log difference. This is equivalent to taking the log of each value, and subtracting the previous value.
# Take the log difference to make data stationarydata['data'] = np.log(data['data'])data['data'] = data['data'].diff()data = data.drop(data.index[0])data.head()
Plotting the transformed time series:
plt.figure(figsize=[15, 7.5]); # Set dimensions for figureplt.plot(data['data'])plt.title("Log Difference of Quaterly EPS for Johnson & Johnson")plt.show()
Now, it seems that we removed the trend. However, we have to be sure that our series is stationary before modelling with an AR(p) process.
We will thus use the augmented Dicker-Fuller test. This will give us the statistical confidence that our time series is indeed stationary.
ad_fuller_result = adfuller(data['data'])print(f'ADF Statistic: {ad_fuller_result[0]}')print(f'p-value: {ad_fuller_result[1]}')
Since we get a large negative ADF statistic and p-value smaller than 0.05, we can reject the null hypothesis and say that our time series is stationary.
Now, letβs find the order of the process by plotting the PACF:
plot_pacf(data['data']);plot_acf(data['data']);
As you can see, after lag 4, the PACF coefficients are not significant anymore. Therefore, we will assume an autoregressive process of order 4.
Now, we will use this information to estimate the coefficients using the Yule-Walker equation:
# Try a AR(4) modelrho, sigma = yule_walker(data['data'], 4)print(f'rho: {-rho}')print(f'sigma: {sigma}')
Therefore, the function is approximated as:
Note that this equation models the transformed series.
Congratulations! You now understand what an autoregressive model is, how to recognize an autoregressive process, how to determine its order, and how to use it to model a real-life time series.
Sharpen your time series analysis skills and learn the latest best practices for time series analysis in Python:
Applied Time Series Analysis in Python
|
[
{
"code": null,
"e": 154,
"s": 47,
"text": "In this hands-on tutorial, we will cover the topic of time series modelling with autoregressive processes."
},
{
"code": null,
"e": 230,
"s": 154,
"text": "This article will cover the following key elements in time series analysis:"
},
{
"code": null,
"e": 253,
"s": 230,
"text": "autoregressive process"
},
{
"code": null,
"e": 274,
"s": 253,
"text": "Yule-Walker equation"
},
{
"code": null,
"e": 287,
"s": 274,
"text": "stationarity"
},
{
"code": null,
"e": 316,
"s": 287,
"text": "Augmented Dicker-Fuller test"
},
{
"code": null,
"e": 420,
"s": 316,
"text": "Make sure to have a Jupyter notebook ready to follow along. The code and the dataset is available here."
},
{
"code": null,
"e": 439,
"s": 420,
"text": "Letβs get started!"
},
{
"code": null,
"e": 649,
"s": 439,
"text": "Learn how to work with more complex models such as SARIMAX, VARMAX, and apply deep learning models (LSTM, CNN, ResNet, autoregressive LSTM) for time series analysis with Applied Time Series Analysis in Python!"
},
{
"code": null,
"e": 856,
"s": 649,
"text": "An autoregressive model uses a linear combination of past values of the target to make forecasts. Of course, the regression is made against the target itself. Mathematically, an AR(p) model is expressed as:"
},
{
"code": null,
"e": 863,
"s": 856,
"text": "Where:"
},
{
"code": null,
"e": 879,
"s": 863,
"text": "p: is the order"
},
{
"code": null,
"e": 896,
"s": 879,
"text": "c: is a constant"
},
{
"code": null,
"e": 911,
"s": 896,
"text": "epsilon: noise"
},
{
"code": null,
"e": 1080,
"s": 911,
"text": "AR(p) model is incredibly flexible and it can model a many different types of time series patterns. This is easily visualized when we simulate autoregressive processes."
},
{
"code": null,
"e": 1204,
"s": 1080,
"text": "Usually, autoregressive models are applied to stationary time series only. This constrains the range of the parameters phi."
},
{
"code": null,
"e": 1408,
"s": 1204,
"text": "For example, an AR(1) model will constrain phi between -1 and 1. Those constraints become more complex as the order of the model increases, but they are automatically considered when modelling in Python."
},
{
"code": null,
"e": 1451,
"s": 1408,
"text": "Letβs simulate an AR(2) process in Python."
},
{
"code": null,
"e": 1591,
"s": 1451,
"text": "We start off by importing some libraries. Not all will be used for the simulation, but they will be required for the rest of this tutorial."
},
{
"code": null,
"e": 1960,
"s": 1591,
"text": "from statsmodels.graphics.tsaplots import plot_pacffrom statsmodels.graphics.tsaplots import plot_acffrom statsmodels.tsa.arima_process import ArmaProcessfrom statsmodels.tsa.stattools import pacffrom statsmodels.regression.linear_model import yule_walkerfrom statsmodels.tsa.stattools import adfullerimport matplotlib.pyplot as pltimport numpy as np%matplotlib inline"
},
{
"code": null,
"e": 2066,
"s": 1960,
"text": "We will use the ArmaProcess library to simulate the time series. It requires us to define our parameters."
},
{
"code": null,
"e": 2106,
"s": 2066,
"text": "We will simulate the following process:"
},
{
"code": null,
"e": 2221,
"s": 2106,
"text": "Since we are dealing with an autoregressive model of order 2, we need to define the coefficient at lag 0, 1 and 2."
},
{
"code": null,
"e": 2282,
"s": 2221,
"text": "Also, we will cancel the effect of a moving average process."
},
{
"code": null,
"e": 2328,
"s": 2282,
"text": "Finally, we will generate 10 000 data points."
},
{
"code": null,
"e": 2337,
"s": 2328,
"text": "In code:"
},
{
"code": null,
"e": 2458,
"s": 2337,
"text": "ar2 = np.array([1, 0.33, 0.5])ma = np.array([1])simulated_AR2_data = ArmaProcess(ar2, ma).generate_sample(nsample=10000)"
},
{
"code": null,
"e": 2487,
"s": 2458,
"text": "We can plot the time series:"
},
{
"code": null,
"e": 2620,
"s": 2487,
"text": "plt.figure(figsize=[10, 7.5]); # Set dimensions for figureplt.plot(simulated_AR2_data)plt.title(\"Simulated AR(2) Process\")plt.show()"
},
{
"code": null,
"e": 2666,
"s": 2620,
"text": "And you should get something similar to this:"
},
{
"code": null,
"e": 2732,
"s": 2666,
"text": "Now, letβs take a look at the autocorrelation plot (correlogram):"
},
{
"code": null,
"e": 2762,
"s": 2732,
"text": "plot_acf(simulated_AR2_data);"
},
{
"code": null,
"e": 3019,
"s": 2762,
"text": "You can see that the coefficient is slowly decaying. This means that it is unlikely a moving average process and it suggests that the time series can probably be modelled with an autoregressive process (which makes sense since that what we are simulating)."
},
{
"code": null,
"e": 3097,
"s": 3019,
"text": "To make sure that this is right, letβs plot the partial autocorrelation plot:"
},
{
"code": null,
"e": 3128,
"s": 3097,
"text": "plot_pacf(simulated_AR2_data);"
},
{
"code": null,
"e": 3291,
"s": 3128,
"text": "As you can see the coefficients are not significant after lag 2. Therefore, the partial autocorrelation plot is useful to determine the order of an AR(p) process."
},
{
"code": null,
"e": 3354,
"s": 3291,
"text": "You can also check the values of each coefficients by running:"
},
{
"code": null,
"e": 3415,
"s": 3354,
"text": "pacf_coef_AR2 = pacf(simulated_AR2_data)print(pacf_coef_AR2)"
},
{
"code": null,
"e": 3561,
"s": 3415,
"text": "Now, in a real project setting, it can be easy to find the order of an AR(p) process, but we need to find a way to estimate the coefficients phi."
},
{
"code": null,
"e": 3707,
"s": 3561,
"text": "To do so, we use the Yule-Walker equation. This equations allows us to estimate the coefficients of an AR(p) model, given that we know the order."
},
{
"code": null,
"e": 3814,
"s": 3707,
"text": "rho, sigma = yule_walker(simulated_AR2_data, 2, method='mle')print(f'rho: {-rho}')print(f'sigma: {sigma}')"
},
{
"code": null,
"e": 3939,
"s": 3814,
"text": "As you can see, the Yule-Walker equation did a decent job at estimating our coefficients and got very close to 0.33 and 0.5."
},
{
"code": null,
"e": 4009,
"s": 3939,
"text": "Now, letβs simulate an AR(3) process. Specifically, we will simulate:"
},
{
"code": null,
"e": 4111,
"s": 4009,
"text": "Similarly to what was previously done, letβs define our coefficients and generate 10 000 data points:"
},
{
"code": null,
"e": 4237,
"s": 4111,
"text": "ar3 = np.array([1, 0.33, 0.5, 0.07])ma = np.array([1])simulated_AR3_data = ArmaProcess(ar3,ma).generate_sample(nsample=10000)"
},
{
"code": null,
"e": 4277,
"s": 4237,
"text": "Then, we can visualize the time series:"
},
{
"code": null,
"e": 4410,
"s": 4277,
"text": "plt.figure(figsize=[10, 7.5]); # Set dimensions for figureplt.plot(simulated_AR3_data)plt.title(\"Simulated AR(3) Process\")plt.show()"
},
{
"code": null,
"e": 4451,
"s": 4410,
"text": "And you should see something similar to:"
},
{
"code": null,
"e": 4485,
"s": 4451,
"text": "Now, looking at the PACF and ACF:"
},
{
"code": null,
"e": 4545,
"s": 4485,
"text": "plot_pacf(simulated_AR3_data);plot_acf(simulated_AR3_data);"
},
{
"code": null,
"e": 4642,
"s": 4545,
"text": "You see that the coefficients are not significant after lag 3 for the PACF function as expected."
},
{
"code": null,
"e": 4716,
"s": 4642,
"text": "Finally, letβs use the Yule-Walker equation to estimate the coefficients:"
},
{
"code": null,
"e": 4823,
"s": 4716,
"text": "rho, sigma = yule_walker(simulated_AR3_data, 3, method='mle')print(f'rho: {-rho}')print(f'sigma: {sigma}')"
},
{
"code": null,
"e": 4885,
"s": 4823,
"text": "Again, the estimations are fairly close to the actual values."
},
{
"code": null,
"e": 4966,
"s": 4885,
"text": "Now, letβs apply our knowledge of autoregressive processes in a project setting."
},
{
"code": null,
"e": 5085,
"s": 4966,
"text": "The objective is to model the quarterly earnings per share (EPS) of the company Johnson&Johnson between 1960 and 1980."
},
{
"code": null,
"e": 5116,
"s": 5085,
"text": "First, letβs read the dataset:"
},
{
"code": null,
"e": 5175,
"s": 5116,
"text": "import pandas as pddata = pd.read_csv('jj.csv')data.head()"
},
{
"code": null,
"e": 5297,
"s": 5175,
"text": "Now, the first five rows are not very useful for us. Letβs plot the entire dataset to get a better visual representation."
},
{
"code": null,
"e": 5538,
"s": 5297,
"text": "plt.figure(figsize=[15, 7.5]); # Set dimensions for figureplt.scatter(data['date'], data['data'])plt.title('Quaterly EPS for Johnson & Johnson')plt.ylabel('EPS per share ($)')plt.xlabel('Date')plt.xticks(rotation=90)plt.grid(True)plt.show()"
},
{
"code": null,
"e": 5756,
"s": 5538,
"text": "Awesome! Now we can the there is clear upwards trend in the data. While this may be a good sign for the company, it is not good in terms of time series modelling, since it means that the time series is not stationary."
},
{
"code": null,
"e": 5827,
"s": 5756,
"text": "As aforementioned, the AR(p) process works only for stationary series."
},
{
"code": null,
"e": 5908,
"s": 5827,
"text": "Therefore, we must apply some transformations to our data to make it stationary."
},
{
"code": null,
"e": 6040,
"s": 5908,
"text": "In this case, will take the log difference. This is equivalent to taking the log of each value, and subtracting the previous value."
},
{
"code": null,
"e": 6201,
"s": 6040,
"text": "# Take the log difference to make data stationarydata['data'] = np.log(data['data'])data['data'] = data['data'].diff()data = data.drop(data.index[0])data.head()"
},
{
"code": null,
"e": 6239,
"s": 6201,
"text": "Plotting the transformed time series:"
},
{
"code": null,
"e": 6395,
"s": 6239,
"text": "plt.figure(figsize=[15, 7.5]); # Set dimensions for figureplt.plot(data['data'])plt.title(\"Log Difference of Quaterly EPS for Johnson & Johnson\")plt.show()"
},
{
"code": null,
"e": 6534,
"s": 6395,
"text": "Now, it seems that we removed the trend. However, we have to be sure that our series is stationary before modelling with an AR(p) process."
},
{
"code": null,
"e": 6673,
"s": 6534,
"text": "We will thus use the augmented Dicker-Fuller test. This will give us the statistical confidence that our time series is indeed stationary."
},
{
"code": null,
"e": 6801,
"s": 6673,
"text": "ad_fuller_result = adfuller(data['data'])print(f'ADF Statistic: {ad_fuller_result[0]}')print(f'p-value: {ad_fuller_result[1]}')"
},
{
"code": null,
"e": 6954,
"s": 6801,
"text": "Since we get a large negative ADF statistic and p-value smaller than 0.05, we can reject the null hypothesis and say that our time series is stationary."
},
{
"code": null,
"e": 7017,
"s": 6954,
"text": "Now, letβs find the order of the process by plotting the PACF:"
},
{
"code": null,
"e": 7065,
"s": 7017,
"text": "plot_pacf(data['data']);plot_acf(data['data']);"
},
{
"code": null,
"e": 7209,
"s": 7065,
"text": "As you can see, after lag 4, the PACF coefficients are not significant anymore. Therefore, we will assume an autoregressive process of order 4."
},
{
"code": null,
"e": 7304,
"s": 7209,
"text": "Now, we will use this information to estimate the coefficients using the Yule-Walker equation:"
},
{
"code": null,
"e": 7410,
"s": 7304,
"text": "# Try a AR(4) modelrho, sigma = yule_walker(data['data'], 4)print(f'rho: {-rho}')print(f'sigma: {sigma}')"
},
{
"code": null,
"e": 7454,
"s": 7410,
"text": "Therefore, the function is approximated as:"
},
{
"code": null,
"e": 7509,
"s": 7454,
"text": "Note that this equation models the transformed series."
},
{
"code": null,
"e": 7702,
"s": 7509,
"text": "Congratulations! You now understand what an autoregressive model is, how to recognize an autoregressive process, how to determine its order, and how to use it to model a real-life time series."
},
{
"code": null,
"e": 7815,
"s": 7702,
"text": "Sharpen your time series analysis skills and learn the latest best practices for time series analysis in Python:"
}
] |
Developing and Deploying a Machine Learning Model on Vertex AI using Python | by Lak Lakshmanan | Towards Data Science
|
Write ML Pipelines that will make your MLOps team happy: follow a clean separation of responsibility between model code and ops code. This article show you how to do that.
In my two previous articles on Vertex AI, I showed you how to use the web console to create and deploy an AutoML model and how to take a TensorFlow model that you somehow trained and deploy it to Vertex AI. But both those approaches donβt really scale to hundreds of models and large teams.
When you create an AutoML model using the Google Cloud web console, you get back an end-point that can be monitored and on which you can set up continuous evaluation. If you find that the model is drifting, retraining it on new data automatically is difficult β you donβt want to wake up at 2am to use the user interface to train the model. It would be much better if you could train and deploy the model using just code. Code is much easier for your MLOps team to automate.
Taking a TensorFlow model that you trained in your Jupyter notebook and deploying the SavedModel to Vertex AI has the same problem. Retraining is going to be difficult because the ops team will have to set up all of the ops and monitoring and scheduling on top of something that is really clunky and totally non-minimal.
For retraining, itβs much better for the entire process β from dataset creation to training to deployment to be driven by code. Do this, and your operations team will thank you for making their life easy in terms of clearly separating out the model code from the ops code, and expressing everything in Python rather than in notebooks.
How to get this separation in Vertex AI is what Iβm going to show you in this article.
Jupyter notebooks are great for development, but I strongly recommend against putting those notebooks directly into production (Yes, I do know about Papermill).
What I recommend is that you convert your initial prototyping model code into a Python file and then continue all development in it. Throw away the Jupyter notebook. You will invoke the extracted (and maintained) Python code from a scratch notebook for future experimentation.
You can see my example in https://github.com/GoogleCloudPlatform/data-science-on-gcp/tree/edition2/09_vertexai. See the files model.py and train_on_vertexai.py and use them to follow along.
The file model.py contains all the Keras model code from my Jupyter notebook (flights_model_tf2.ipynb in the same GitHub directory). The difference is that it is executable, and much of the notebook code is extracted into a function called train_and_evaluate.py:
def train_and_evaluate(train_data_pattern, eval_data_pattern, test_data_pattern, export_dir, output_dir): ... train_dataset = read_dataset(train_data_pattern, train_batch_size) eval_dataset = read_dataset(eval_data_pattern, eval_batch_size, tf.estimator.ModeKeys.EVAL, num_eval_examples) model = create_model() history = model.fit(train_dataset, validation_data=eval_dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=[cp_callback]) # export logging.info('Exporting to {}'.format(export_dir)) tf.saved_model.save(model, export_dir)
There are three key things to note:
The data is read from URIs specified by train_data_pattern, eval_data_pattern, and test_data_pattern for training, validation, and test datasets respectively.The model creation code is extracted out to a function called create_modelThe model is written out to export_dir, and any other intermediate outputs are written to output_dir.
The data is read from URIs specified by train_data_pattern, eval_data_pattern, and test_data_pattern for training, validation, and test datasets respectively.
The model creation code is extracted out to a function called create_model
The model is written out to export_dir, and any other intermediate outputs are written to output_dir.
The data patterns and output directories are obtained in model.py from environment variables:
OUTPUT_DIR = 'gs://{}/ch9/trained_model'.format(BUCKET) OUTPUT_MODEL_DIR = os.getenv("AIP_MODEL_DIR") TRAIN_DATA_PATTERN = os.getenv("AIP_TRAINING_DATA_URI") EVAL_DATA_PATTERN = os.getenv("AIP_VALIDATION_DATA_URI") TEST_DATA_PATTERN = os.getenv("AIP_TEST_DATA_URI")
This is very important, because it is the contract between your code and Vertex AI and is needed in order for all the automagical things to happen.
Chances are, however, that you will need to run this code outside Vertex AI (for example, during development). In such a case, the environment variable will not be set, and so the variables will all be None. Look for this case, and set them to values in your development environment:
if not OUTPUT_MODEL_DIR: OUTPUT_MODEL_DIR = os.path.join(OUTPUT_DIR, 'export/flights_{}'.format(time.strftime("%Y%m%d-%H%M%S"))) if not TRAIN_DATA_PATTERN: TRAIN_DATA_PATTERN = 'gs://{}/ch9/data/train*'.format(BUCKET) if not EVAL_DATA_PATTERN: EVAL_DATA_PATTERN = 'gs://{}/ch9/data/eval*'.format(BUCKET)
These files can be very small because they are only for development. Actual production runs will run inside Vertex AI where the environment variables will be set.
Once you finish writing model.py, make sure it works:
python3 model.py --bucket <bucket-name>
Now, you are ready to invoke it from a Vertex AI pipeline.
The training pipeline (See train_on_vertexai.py) needs to do five things in code:
Load up a managed dataset in Vertex AISet up training infrastructure to run model.pyRun model.py, and pass in the managed dataset.Find the endpoint to which to deploy the model.Deploy the model to the endpoint
Load up a managed dataset in Vertex AI
Set up training infrastructure to run model.py
Run model.py, and pass in the managed dataset.
Find the endpoint to which to deploy the model.
Deploy the model to the endpoint
1. Managed Dataset
This is how to load up a tabular dataset (options exist for image, text, etc. datasets, and for tabular data in BigQuery):
data_set = aiplatform.TabularDataset.create( display_name='data-{}'.format(ENDPOINT_NAME), gcs_source=['gs://{}/ch9/data/all.csv'.format(BUCKET)])
Note that I am passing in *all* of the data. Vertex AI will take care of splitting the data into train, validate, and test datasets and sending it to the training program.
2. Training setup
Next, create a training job passing in model.py, the training container image, and the serving container image:
model_display_name = '{}-{}'.format(ENDPOINT_NAME, timestamp)job = aiplatform.CustomTrainingJob( display_name='train-{}'.format(model_display_name), script_path="model.py", container_uri=train_image, requirements=[], # any extra Python packages model_serving_container_image_uri=deploy_image)
(for why you want to assign a timestamped name to the model, please see How to Deploy a TensorFlow Model to Vertex AI)
3. Run training job
Running the job involves running model.py on the managed dataset on some hardware:
model = job.run( dataset=data_set, model_display_name=model_display_name, args=['--bucket', BUCKET], replica_count=1, machine_type='n1-standard-4', accelerator_type=aip.AcceleratorType.NVIDIA_TESLA_T4.name, accelerator_count=1, sync=develop_mode )
4. Find endpoint
We want to deploy to a preexisting endpoint (read see How to Deploy a TensorFlow Model to Vertex AI for an explanation of what an endpoint is). So, find an existing endpoint or create one if not:
endpoints = aiplatform.Endpoint.list( filter='display_name="{}"'.format(ENDPOINT_NAME), order_by='create_time desc', project=PROJECT, location=REGION, ) if len(endpoints) > 0: endpoint = endpoints[0] # most recently created else: endpoint = aiplatform.Endpoint.create( display_name=ENDPOINT_NAME, project=PROJECT, location=REGION )
5. Deploy model
Finally, deploy the model to the endpoint:
model.deploy( endpoint=endpoint, traffic_split={"0": 100}, machine_type='n1-standard-2', min_replica_count=1, max_replica_count=1 )
Thatβs it! Now, you have a Python program that you can run anytime you want to retrain and/or deploy the trained model. Of course, the MLOps person will typically not replace the model wholesale, but send only a small fraction of the traffic to the model. Theyβll probably also set up monitoring and continuous evaluation on the endpoint in Vertex AI. But youβve made it easy for them to do that.
What changes in the above pipeline if I want to use AutoML instead of my custom training job? Well, I donβt need my own model.py of course. So, instead of the CustomTrainingJob, Iβll use AutoML.
Setting and running the training job (Steps 3 and 4 above) now become:
def train_automl_model(data_set, timestamp): # train model_display_name = '{}-{}'.format(ENDPOINT_NAME, timestamp) job = aiplatform.AutoMLTabularTrainingJob( display_name='train-{}'.format(model_display_name), optimization_prediction_type='classification' ) model = job.run( dataset=data_set, target_column='ontime', model_display_name=model_display_name, budget_milli_node_hours=(300 if develop_mode else 2000), disable_early_stopping=False ) return job, model
Thatβs the only change! The rest of the pipeline stays the same. Thatβs what we mean when we say that you have a unified platform for ML development.
In fact, you can similarly change the ML framework to PyTorch or to sklearn or XGBoost and, as far as the MLOps people are concerned, there are only minimal changes. In my train_on_vertexai.py, I switch between custom Keras code and AutoML with a command-line parameter.
By default, Vertex AI does a fractional split of the data (80% to training, 10% each for validation and testing). What if you want to control the split? There are several options available (based on time, etc.).
Suppose you want to add a column to your dataset that controls the split, you can do this when creating the data:
CREATE OR REPLACE TABLE dsongcp.flights_all_data ASSELECT IF(arr_delay < 15, 1.0, 0.0) AS ontime, dep_delay, taxi_out, ... IF (is_train_day = 'True', IF(ABS(MOD(FARM_FINGERPRINT(CAST(f.FL_DATE AS STRING)), 100)) < 60, 'TRAIN', 'VALIDATE'), 'TEST') AS data_splitFROM dsongcp.flights_tzcorr f...
Basically, there is a column that Iβm calling data_split that takes the values TRAIN, VALIDATE or TEST. So, every row in the managed dataset is assigned to one of these three splits.
Then, when Iβm training the job (whether itβs custom model or automl), I specify what the predefined splitting column is:
model = job.run( dataset=data_set, # See https://googleapis.dev/python/aiplatform/latest/aiplatform.html# predefined_split_column_name='data_split', model_display_name=model_display_name,
Thatβs it! Vertex AI will take care of the rest, including assigning all the necessary metadata to the models being trained.
Bottom-line: MLOps is getting easier as more and more of it becomes automatically managed. Lean into this, by following a clean separation of responsibilities in your code.
Enjoy!
Giving Vertex AI, the New Unified ML Platform on Google Cloud, a Spin:Why do we need it, how good is the code-free ML training, really, and what does all this mean for data science jobs?How to Deploy a TensorFlow Model to Vertex AI: Working with saved models and endpoints in Vertex AIDeveloping and Deploying a Machine Learning Model on Vertex AI using Python: Write training pipelines that will make your MLOps team happyHow to build an MLOps pipeline for hyperparameter tuning in Vertex AI:Best practices to set up your model and orchestrator for hyperparameter tuning
Giving Vertex AI, the New Unified ML Platform on Google Cloud, a Spin:Why do we need it, how good is the code-free ML training, really, and what does all this mean for data science jobs?
How to Deploy a TensorFlow Model to Vertex AI: Working with saved models and endpoints in Vertex AI
Developing and Deploying a Machine Learning Model on Vertex AI using Python: Write training pipelines that will make your MLOps team happy
How to build an MLOps pipeline for hyperparameter tuning in Vertex AI:Best practices to set up your model and orchestrator for hyperparameter tuning
|
[
{
"code": null,
"e": 343,
"s": 171,
"text": "Write ML Pipelines that will make your MLOps team happy: follow a clean separation of responsibility between model code and ops code. This article show you how to do that."
},
{
"code": null,
"e": 634,
"s": 343,
"text": "In my two previous articles on Vertex AI, I showed you how to use the web console to create and deploy an AutoML model and how to take a TensorFlow model that you somehow trained and deploy it to Vertex AI. But both those approaches donβt really scale to hundreds of models and large teams."
},
{
"code": null,
"e": 1109,
"s": 634,
"text": "When you create an AutoML model using the Google Cloud web console, you get back an end-point that can be monitored and on which you can set up continuous evaluation. If you find that the model is drifting, retraining it on new data automatically is difficult β you donβt want to wake up at 2am to use the user interface to train the model. It would be much better if you could train and deploy the model using just code. Code is much easier for your MLOps team to automate."
},
{
"code": null,
"e": 1430,
"s": 1109,
"text": "Taking a TensorFlow model that you trained in your Jupyter notebook and deploying the SavedModel to Vertex AI has the same problem. Retraining is going to be difficult because the ops team will have to set up all of the ops and monitoring and scheduling on top of something that is really clunky and totally non-minimal."
},
{
"code": null,
"e": 1765,
"s": 1430,
"text": "For retraining, itβs much better for the entire process β from dataset creation to training to deployment to be driven by code. Do this, and your operations team will thank you for making their life easy in terms of clearly separating out the model code from the ops code, and expressing everything in Python rather than in notebooks."
},
{
"code": null,
"e": 1852,
"s": 1765,
"text": "How to get this separation in Vertex AI is what Iβm going to show you in this article."
},
{
"code": null,
"e": 2013,
"s": 1852,
"text": "Jupyter notebooks are great for development, but I strongly recommend against putting those notebooks directly into production (Yes, I do know about Papermill)."
},
{
"code": null,
"e": 2290,
"s": 2013,
"text": "What I recommend is that you convert your initial prototyping model code into a Python file and then continue all development in it. Throw away the Jupyter notebook. You will invoke the extracted (and maintained) Python code from a scratch notebook for future experimentation."
},
{
"code": null,
"e": 2480,
"s": 2290,
"text": "You can see my example in https://github.com/GoogleCloudPlatform/data-science-on-gcp/tree/edition2/09_vertexai. See the files model.py and train_on_vertexai.py and use them to follow along."
},
{
"code": null,
"e": 2743,
"s": 2480,
"text": "The file model.py contains all the Keras model code from my Jupyter notebook (flights_model_tf2.ipynb in the same GitHub directory). The difference is that it is executable, and much of the notebook code is extracted into a function called train_and_evaluate.py:"
},
{
"code": null,
"e": 3407,
"s": 2743,
"text": "def train_and_evaluate(train_data_pattern, eval_data_pattern, test_data_pattern, export_dir, output_dir): ... train_dataset = read_dataset(train_data_pattern, train_batch_size) eval_dataset = read_dataset(eval_data_pattern, eval_batch_size, tf.estimator.ModeKeys.EVAL, num_eval_examples) model = create_model() history = model.fit(train_dataset, validation_data=eval_dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=[cp_callback]) # export logging.info('Exporting to {}'.format(export_dir)) tf.saved_model.save(model, export_dir)"
},
{
"code": null,
"e": 3443,
"s": 3407,
"text": "There are three key things to note:"
},
{
"code": null,
"e": 3777,
"s": 3443,
"text": "The data is read from URIs specified by train_data_pattern, eval_data_pattern, and test_data_pattern for training, validation, and test datasets respectively.The model creation code is extracted out to a function called create_modelThe model is written out to export_dir, and any other intermediate outputs are written to output_dir."
},
{
"code": null,
"e": 3936,
"s": 3777,
"text": "The data is read from URIs specified by train_data_pattern, eval_data_pattern, and test_data_pattern for training, validation, and test datasets respectively."
},
{
"code": null,
"e": 4011,
"s": 3936,
"text": "The model creation code is extracted out to a function called create_model"
},
{
"code": null,
"e": 4113,
"s": 4011,
"text": "The model is written out to export_dir, and any other intermediate outputs are written to output_dir."
},
{
"code": null,
"e": 4207,
"s": 4113,
"text": "The data patterns and output directories are obtained in model.py from environment variables:"
},
{
"code": null,
"e": 4489,
"s": 4207,
"text": " OUTPUT_DIR = 'gs://{}/ch9/trained_model'.format(BUCKET) OUTPUT_MODEL_DIR = os.getenv(\"AIP_MODEL_DIR\") TRAIN_DATA_PATTERN = os.getenv(\"AIP_TRAINING_DATA_URI\") EVAL_DATA_PATTERN = os.getenv(\"AIP_VALIDATION_DATA_URI\") TEST_DATA_PATTERN = os.getenv(\"AIP_TEST_DATA_URI\")"
},
{
"code": null,
"e": 4637,
"s": 4489,
"text": "This is very important, because it is the contract between your code and Vertex AI and is needed in order for all the automagical things to happen."
},
{
"code": null,
"e": 4921,
"s": 4637,
"text": "Chances are, however, that you will need to run this code outside Vertex AI (for example, during development). In such a case, the environment variable will not be set, and so the variables will all be None. Look for this case, and set them to values in your development environment:"
},
{
"code": null,
"e": 5295,
"s": 4921,
"text": " if not OUTPUT_MODEL_DIR: OUTPUT_MODEL_DIR = os.path.join(OUTPUT_DIR, 'export/flights_{}'.format(time.strftime(\"%Y%m%d-%H%M%S\"))) if not TRAIN_DATA_PATTERN: TRAIN_DATA_PATTERN = 'gs://{}/ch9/data/train*'.format(BUCKET) if not EVAL_DATA_PATTERN: EVAL_DATA_PATTERN = 'gs://{}/ch9/data/eval*'.format(BUCKET)"
},
{
"code": null,
"e": 5458,
"s": 5295,
"text": "These files can be very small because they are only for development. Actual production runs will run inside Vertex AI where the environment variables will be set."
},
{
"code": null,
"e": 5512,
"s": 5458,
"text": "Once you finish writing model.py, make sure it works:"
},
{
"code": null,
"e": 5553,
"s": 5512,
"text": "python3 model.py --bucket <bucket-name>"
},
{
"code": null,
"e": 5612,
"s": 5553,
"text": "Now, you are ready to invoke it from a Vertex AI pipeline."
},
{
"code": null,
"e": 5694,
"s": 5612,
"text": "The training pipeline (See train_on_vertexai.py) needs to do five things in code:"
},
{
"code": null,
"e": 5904,
"s": 5694,
"text": "Load up a managed dataset in Vertex AISet up training infrastructure to run model.pyRun model.py, and pass in the managed dataset.Find the endpoint to which to deploy the model.Deploy the model to the endpoint"
},
{
"code": null,
"e": 5943,
"s": 5904,
"text": "Load up a managed dataset in Vertex AI"
},
{
"code": null,
"e": 5990,
"s": 5943,
"text": "Set up training infrastructure to run model.py"
},
{
"code": null,
"e": 6037,
"s": 5990,
"text": "Run model.py, and pass in the managed dataset."
},
{
"code": null,
"e": 6085,
"s": 6037,
"text": "Find the endpoint to which to deploy the model."
},
{
"code": null,
"e": 6118,
"s": 6085,
"text": "Deploy the model to the endpoint"
},
{
"code": null,
"e": 6137,
"s": 6118,
"text": "1. Managed Dataset"
},
{
"code": null,
"e": 6260,
"s": 6137,
"text": "This is how to load up a tabular dataset (options exist for image, text, etc. datasets, and for tabular data in BigQuery):"
},
{
"code": null,
"e": 6421,
"s": 6260,
"text": "data_set = aiplatform.TabularDataset.create( display_name='data-{}'.format(ENDPOINT_NAME), gcs_source=['gs://{}/ch9/data/all.csv'.format(BUCKET)])"
},
{
"code": null,
"e": 6593,
"s": 6421,
"text": "Note that I am passing in *all* of the data. Vertex AI will take care of splitting the data into train, validate, and test datasets and sending it to the training program."
},
{
"code": null,
"e": 6611,
"s": 6593,
"text": "2. Training setup"
},
{
"code": null,
"e": 6723,
"s": 6611,
"text": "Next, create a training job passing in model.py, the training container image, and the serving container image:"
},
{
"code": null,
"e": 7052,
"s": 6723,
"text": "model_display_name = '{}-{}'.format(ENDPOINT_NAME, timestamp)job = aiplatform.CustomTrainingJob( display_name='train-{}'.format(model_display_name), script_path=\"model.py\", container_uri=train_image, requirements=[], # any extra Python packages model_serving_container_image_uri=deploy_image)"
},
{
"code": null,
"e": 7171,
"s": 7052,
"text": "(for why you want to assign a timestamped name to the model, please see How to Deploy a TensorFlow Model to Vertex AI)"
},
{
"code": null,
"e": 7191,
"s": 7171,
"text": "3. Run training job"
},
{
"code": null,
"e": 7274,
"s": 7191,
"text": "Running the job involves running model.py on the managed dataset on some hardware:"
},
{
"code": null,
"e": 7581,
"s": 7274,
"text": "model = job.run( dataset=data_set, model_display_name=model_display_name, args=['--bucket', BUCKET], replica_count=1, machine_type='n1-standard-4', accelerator_type=aip.AcceleratorType.NVIDIA_TESLA_T4.name, accelerator_count=1, sync=develop_mode )"
},
{
"code": null,
"e": 7598,
"s": 7581,
"text": "4. Find endpoint"
},
{
"code": null,
"e": 7794,
"s": 7598,
"text": "We want to deploy to a preexisting endpoint (read see How to Deploy a TensorFlow Model to Vertex AI for an explanation of what an endpoint is). So, find an existing endpoint or create one if not:"
},
{
"code": null,
"e": 8193,
"s": 7794,
"text": " endpoints = aiplatform.Endpoint.list( filter='display_name=\"{}\"'.format(ENDPOINT_NAME), order_by='create_time desc', project=PROJECT, location=REGION, ) if len(endpoints) > 0: endpoint = endpoints[0] # most recently created else: endpoint = aiplatform.Endpoint.create( display_name=ENDPOINT_NAME, project=PROJECT, location=REGION )"
},
{
"code": null,
"e": 8209,
"s": 8193,
"text": "5. Deploy model"
},
{
"code": null,
"e": 8252,
"s": 8209,
"text": "Finally, deploy the model to the endpoint:"
},
{
"code": null,
"e": 8422,
"s": 8252,
"text": "model.deploy( endpoint=endpoint, traffic_split={\"0\": 100}, machine_type='n1-standard-2', min_replica_count=1, max_replica_count=1 )"
},
{
"code": null,
"e": 8819,
"s": 8422,
"text": "Thatβs it! Now, you have a Python program that you can run anytime you want to retrain and/or deploy the trained model. Of course, the MLOps person will typically not replace the model wholesale, but send only a small fraction of the traffic to the model. Theyβll probably also set up monitoring and continuous evaluation on the endpoint in Vertex AI. But youβve made it easy for them to do that."
},
{
"code": null,
"e": 9014,
"s": 8819,
"text": "What changes in the above pipeline if I want to use AutoML instead of my custom training job? Well, I donβt need my own model.py of course. So, instead of the CustomTrainingJob, Iβll use AutoML."
},
{
"code": null,
"e": 9085,
"s": 9014,
"text": "Setting and running the training job (Steps 3 and 4 above) now become:"
},
{
"code": null,
"e": 9617,
"s": 9085,
"text": "def train_automl_model(data_set, timestamp): # train model_display_name = '{}-{}'.format(ENDPOINT_NAME, timestamp) job = aiplatform.AutoMLTabularTrainingJob( display_name='train-{}'.format(model_display_name), optimization_prediction_type='classification' ) model = job.run( dataset=data_set, target_column='ontime', model_display_name=model_display_name, budget_milli_node_hours=(300 if develop_mode else 2000), disable_early_stopping=False ) return job, model"
},
{
"code": null,
"e": 9767,
"s": 9617,
"text": "Thatβs the only change! The rest of the pipeline stays the same. Thatβs what we mean when we say that you have a unified platform for ML development."
},
{
"code": null,
"e": 10038,
"s": 9767,
"text": "In fact, you can similarly change the ML framework to PyTorch or to sklearn or XGBoost and, as far as the MLOps people are concerned, there are only minimal changes. In my train_on_vertexai.py, I switch between custom Keras code and AutoML with a command-line parameter."
},
{
"code": null,
"e": 10250,
"s": 10038,
"text": "By default, Vertex AI does a fractional split of the data (80% to training, 10% each for validation and testing). What if you want to control the split? There are several options available (based on time, etc.)."
},
{
"code": null,
"e": 10364,
"s": 10250,
"text": "Suppose you want to add a column to your dataset that controls the split, you can do this when creating the data:"
},
{
"code": null,
"e": 10673,
"s": 10364,
"text": "CREATE OR REPLACE TABLE dsongcp.flights_all_data ASSELECT IF(arr_delay < 15, 1.0, 0.0) AS ontime, dep_delay, taxi_out, ... IF (is_train_day = 'True', IF(ABS(MOD(FARM_FINGERPRINT(CAST(f.FL_DATE AS STRING)), 100)) < 60, 'TRAIN', 'VALIDATE'), 'TEST') AS data_splitFROM dsongcp.flights_tzcorr f..."
},
{
"code": null,
"e": 10856,
"s": 10673,
"text": "Basically, there is a column that Iβm calling data_split that takes the values TRAIN, VALIDATE or TEST. So, every row in the managed dataset is assigned to one of these three splits."
},
{
"code": null,
"e": 10978,
"s": 10856,
"text": "Then, when Iβm training the job (whether itβs custom model or automl), I specify what the predefined splitting column is:"
},
{
"code": null,
"e": 11194,
"s": 10978,
"text": "model = job.run( dataset=data_set, # See https://googleapis.dev/python/aiplatform/latest/aiplatform.html# predefined_split_column_name='data_split', model_display_name=model_display_name,"
},
{
"code": null,
"e": 11319,
"s": 11194,
"text": "Thatβs it! Vertex AI will take care of the rest, including assigning all the necessary metadata to the models being trained."
},
{
"code": null,
"e": 11492,
"s": 11319,
"text": "Bottom-line: MLOps is getting easier as more and more of it becomes automatically managed. Lean into this, by following a clean separation of responsibilities in your code."
},
{
"code": null,
"e": 11499,
"s": 11492,
"text": "Enjoy!"
},
{
"code": null,
"e": 12071,
"s": 11499,
"text": "Giving Vertex AI, the New Unified ML Platform on Google Cloud, a Spin:Why do we need it, how good is the code-free ML training, really, and what does all this mean for data science jobs?How to Deploy a TensorFlow Model to Vertex AI: Working with saved models and endpoints in Vertex AIDeveloping and Deploying a Machine Learning Model on Vertex AI using Python: Write training pipelines that will make your MLOps team happyHow to build an MLOps pipeline for hyperparameter tuning in Vertex AI:Best practices to set up your model and orchestrator for hyperparameter tuning"
},
{
"code": null,
"e": 12258,
"s": 12071,
"text": "Giving Vertex AI, the New Unified ML Platform on Google Cloud, a Spin:Why do we need it, how good is the code-free ML training, really, and what does all this mean for data science jobs?"
},
{
"code": null,
"e": 12358,
"s": 12258,
"text": "How to Deploy a TensorFlow Model to Vertex AI: Working with saved models and endpoints in Vertex AI"
},
{
"code": null,
"e": 12497,
"s": 12358,
"text": "Developing and Deploying a Machine Learning Model on Vertex AI using Python: Write training pipelines that will make your MLOps team happy"
}
] |
C library function - strspn()
|
The C library function size_t strspn(const char *str1, const char *str2) calculates the length of the initial segment of str1 which consists entirely of characters in str2.
Following is the declaration for strspn() function.
size_t strspn(const char *str1, const char *str2)
str1 β This is the main C string to be scanned.
str1 β This is the main C string to be scanned.
str2 β This is the string containing the list of characters to match in str1.
str2 β This is the string containing the list of characters to match in str1.
This function returns the number of characters in the initial segment of str1 which consist only of characters from str2.
The following example shows the usage of strspn() function.
#include <stdio.h>
#include <string.h>
int main () {
int len;
const char str1[] = "ABCDEFG019874";
const char str2[] = "ABCD";
len = strspn(str1, str2);
printf("Length of initial segment matching %d\n", len );
return(0);
}
Let us compile and run the above program that will produce the following result β
Length of initial segment matching 4
12 Lectures
2 hours
Nishant Malik
12 Lectures
2.5 hours
Nishant Malik
48 Lectures
6.5 hours
Asif Hussain
12 Lectures
2 hours
Richa Maheshwari
20 Lectures
3.5 hours
Vandana Annavaram
44 Lectures
1 hours
Amit Diwan
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2180,
"s": 2007,
"text": "The C library function size_t strspn(const char *str1, const char *str2) calculates the length of the initial segment of str1 which consists entirely of characters in str2."
},
{
"code": null,
"e": 2232,
"s": 2180,
"text": "Following is the declaration for strspn() function."
},
{
"code": null,
"e": 2282,
"s": 2232,
"text": "size_t strspn(const char *str1, const char *str2)"
},
{
"code": null,
"e": 2330,
"s": 2282,
"text": "str1 β This is the main C string to be scanned."
},
{
"code": null,
"e": 2378,
"s": 2330,
"text": "str1 β This is the main C string to be scanned."
},
{
"code": null,
"e": 2456,
"s": 2378,
"text": "str2 β This is the string containing the list of characters to match in str1."
},
{
"code": null,
"e": 2534,
"s": 2456,
"text": "str2 β This is the string containing the list of characters to match in str1."
},
{
"code": null,
"e": 2656,
"s": 2534,
"text": "This function returns the number of characters in the initial segment of str1 which consist only of characters from str2."
},
{
"code": null,
"e": 2716,
"s": 2656,
"text": "The following example shows the usage of strspn() function."
},
{
"code": null,
"e": 2964,
"s": 2716,
"text": "#include <stdio.h>\n#include <string.h>\n\nint main () {\n int len;\n const char str1[] = \"ABCDEFG019874\";\n const char str2[] = \"ABCD\";\n\n len = strspn(str1, str2);\n\n printf(\"Length of initial segment matching %d\\n\", len );\n \n return(0);\n}"
},
{
"code": null,
"e": 3046,
"s": 2964,
"text": "Let us compile and run the above program that will produce the following result β"
},
{
"code": null,
"e": 3084,
"s": 3046,
"text": "Length of initial segment matching 4\n"
},
{
"code": null,
"e": 3117,
"s": 3084,
"text": "\n 12 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 3132,
"s": 3117,
"text": " Nishant Malik"
},
{
"code": null,
"e": 3167,
"s": 3132,
"text": "\n 12 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 3182,
"s": 3167,
"text": " Nishant Malik"
},
{
"code": null,
"e": 3217,
"s": 3182,
"text": "\n 48 Lectures \n 6.5 hours \n"
},
{
"code": null,
"e": 3231,
"s": 3217,
"text": " Asif Hussain"
},
{
"code": null,
"e": 3264,
"s": 3231,
"text": "\n 12 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 3282,
"s": 3264,
"text": " Richa Maheshwari"
},
{
"code": null,
"e": 3317,
"s": 3282,
"text": "\n 20 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 3336,
"s": 3317,
"text": " Vandana Annavaram"
},
{
"code": null,
"e": 3369,
"s": 3336,
"text": "\n 44 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 3381,
"s": 3369,
"text": " Amit Diwan"
},
{
"code": null,
"e": 3388,
"s": 3381,
"text": " Print"
},
{
"code": null,
"e": 3399,
"s": 3388,
"text": " Add Notes"
}
] |
Are static methods inherited in Java?
|
The static keyword is used to create methods that will exist independently of any instances created for the class.
Static methods do not use any instance variables of any object of the class they are defined in. Static methods take all the data from parameters and compute something from those parameters, with no reference to variables.
We can inherit static methods in Java.
In the example we are creating a class named Demo and, declared a static method named display().
We created another class Sample, extended the Demo class and tried to access the display() method using the sub class object.
Live Demo
class Dem{
public static void display(){}
}
public class Sample extends Dem {
public static void display(){
System.out.println("Hello this is a static method");
}
public static void main(String args[]) throws Exception{
new Sample().display();
}
}
Hello this is a static method
|
[
{
"code": null,
"e": 1177,
"s": 1062,
"text": "The static keyword is used to create methods that will exist independently of any instances created for the class."
},
{
"code": null,
"e": 1400,
"s": 1177,
"text": "Static methods do not use any instance variables of any object of the class they are defined in. Static methods take all the data from parameters and compute something from those parameters, with no reference to variables."
},
{
"code": null,
"e": 1439,
"s": 1400,
"text": "We can inherit static methods in Java."
},
{
"code": null,
"e": 1536,
"s": 1439,
"text": "In the example we are creating a class named Demo and, declared a static method named display()."
},
{
"code": null,
"e": 1662,
"s": 1536,
"text": "We created another class Sample, extended the Demo class and tried to access the display() method using the sub class object."
},
{
"code": null,
"e": 1673,
"s": 1662,
"text": " Live Demo"
},
{
"code": null,
"e": 1948,
"s": 1673,
"text": "class Dem{\n public static void display(){}\n}\npublic class Sample extends Dem {\n public static void display(){\n System.out.println(\"Hello this is a static method\");\n }\n public static void main(String args[]) throws Exception{\n new Sample().display();\n }\n}"
},
{
"code": null,
"e": 1979,
"s": 1948,
"text": "Hello this is a static method\n"
}
] |
Great Expectations: The Data Testing Tool β Is This the Answer to Our Data Quality Needs? | by Karen Bajador Valencia | Towards Data Science
|
Iβm a data engineer currently working in the banking industry. I play a big part in delivering an essential project that focuses on risk assessments to protect the institution and its customers from financial crimes. It is particularly crucial to produce periodical risk reports that are accurate, complete, consistent, time-relevant and standardised to perform correct analysis and mitigate inherent risks. Being unable to do so will result in serious consequences not only to the bank but also to its stakeholders and individual clients. Incorporating a robust data quality process in data pipelines is a must and choosing an efficient tool to utilise for this purpose is definitely going to be a game-changer.
Great Expectations is a tool that helps teams eliminate pipeline debt through data testing, documentation and profiling. Iβve been meaning to explore and evaluate this tool to determine if it is feasible to integrate with our existing environment and whether it could potentially ameliorate apparent constraints in our current process and prevent major refactoring efforts when we migrate data platforms in the future.
To deliver downstream metrics for analysis, we have to process hundreds of millions of data from a variety of source systems that goes through different layers of transformation. We leverage storage and compute suitable for Big data such as AWS S3 and AWS EMR, use Jupyter and Pyspark as ETL tools, and load the final metrics into AWS Redshift.
The pipeline does five main things: prepare the dependencies, determine the order of execution, execute the notebooks, validate test results, and lastly, load the final metrics into the Data warehouse.
Each notebook creates a single data entity. To run some quality checks, aggregated data is queried and loaded into a DataFrame and unit tests are executed against it. The results of the tests are logged into S3 and loaded during the test validation phase to assess coverage.
This process currently works but there are a few things that I think could be done better.
Unit-test code exists in each notebook. But most notebooks are pretty much running exactly the same unit tests. This results in duplication of code.
It would be nice to have a user-friendly static website shared across the team that shows what exactly is going on with our data in any environment. In our current setup, all we get is a long list of test results outputted in the console when the pipeline is executed.
We are anticipating a big move from Redshift and Spark environment to Snowflake soon which means weβll have to ditch our notebooks that are mainly written in Pyspark and where unit tests currently reside, and possibly switch to DBT to do our data transformations. Weβll have to find a way to run the tests against a new data source.
Sure we can always refactor the code to generate a reusable test module that is decoupled and implements dependency injection so that it doesnβt matter if the source of data changes. Documentation, on the other hand, is not trivial to implement. It would be lovely if there is such a tool that will help overcome the current limitations and offers way more beneficial features.
Unit tests or expectations translated directly into documentationAdd extra information about the unit testDocumentation can be hosted in S3Ready-to-use common data quality test functionsSupport most of the data quality checks we already doSupport for multiple Pandas/Spark DataFrame and Snowflake Data sourceExtensibleIntegration with orchestration tools like Airflow or DBT
Unit tests or expectations translated directly into documentation
Add extra information about the unit test
Documentation can be hosted in S3
Ready-to-use common data quality test functions
Support most of the data quality checks we already do
Support for multiple Pandas/Spark DataFrame and Snowflake Data source
Extensible
Integration with orchestration tools like Airflow or DBT
We know how powerful and convenient documentation is but nobody wants to maintain separate documentation just to ensure it is synced with the latest code. I prefer an out-of-the-box feature that generates documentation out of unit tests defined.
Great expectations: Supported; Score: 1/8
I want to be able to elaborate on what is exactly being tested instead of generic descriptions to make it comprehensible enough for anyone who checks the data quality reports.
According to the style guide, it is recommended to use google style docstrings.
Great expectations: Supported; Score: 2/8
AWS S3 is a convenient location to host static websites and since we already use S3, might as well use the same to deploy the shared documentation.
By default, docs are stored locally but can also be deployed in a cloud blob storage including S3.
Great expectations: Supported; Score: 3/8
It would greatly expedite the refactoring process if there are already native unit test functions that we can leverage to avoid having to write dynamic tests from scratch.
Great expectations: Supported; Score: 4/8
Iβve searched for available expectations that potentially match our most common data quality checks at least semantically.
Great expectations: Mostly supported; Score: 5/8
It should work for both our current and future environment with minimal effort to refactor.
Data sources supported: Pandas DataFrame, Spark DataFrame, and Snowflake via SQLAlchemy
Great expectations: Supported; Score: 6/8
Even though it supports most of our data quality expectations out of the box, it should be flexible enough to be able to write custom tests for those that it doesnβt natively support.
Great expectations: Supported; Score: 7/8
It should support integration with the existing orchestration platforms in our data ecosystem.
Validations can be run via Airflow through BashOperator or PythonOperator. There is also an extension package for DBT as well.
Great expectations: Supported; Score: 8/8
Looks like Great Expectations looks good on paper but will it actually do the work? Letβs find out! Follow me in my journey as I try out the tool and explore its capabilities. Watch out for the next sequel in this blog series.
Data quality unit tests in Pyspark using Great Expectations
|
[
{
"code": null,
"e": 884,
"s": 171,
"text": "Iβm a data engineer currently working in the banking industry. I play a big part in delivering an essential project that focuses on risk assessments to protect the institution and its customers from financial crimes. It is particularly crucial to produce periodical risk reports that are accurate, complete, consistent, time-relevant and standardised to perform correct analysis and mitigate inherent risks. Being unable to do so will result in serious consequences not only to the bank but also to its stakeholders and individual clients. Incorporating a robust data quality process in data pipelines is a must and choosing an efficient tool to utilise for this purpose is definitely going to be a game-changer."
},
{
"code": null,
"e": 1303,
"s": 884,
"text": "Great Expectations is a tool that helps teams eliminate pipeline debt through data testing, documentation and profiling. Iβve been meaning to explore and evaluate this tool to determine if it is feasible to integrate with our existing environment and whether it could potentially ameliorate apparent constraints in our current process and prevent major refactoring efforts when we migrate data platforms in the future."
},
{
"code": null,
"e": 1648,
"s": 1303,
"text": "To deliver downstream metrics for analysis, we have to process hundreds of millions of data from a variety of source systems that goes through different layers of transformation. We leverage storage and compute suitable for Big data such as AWS S3 and AWS EMR, use Jupyter and Pyspark as ETL tools, and load the final metrics into AWS Redshift."
},
{
"code": null,
"e": 1850,
"s": 1648,
"text": "The pipeline does five main things: prepare the dependencies, determine the order of execution, execute the notebooks, validate test results, and lastly, load the final metrics into the Data warehouse."
},
{
"code": null,
"e": 2125,
"s": 1850,
"text": "Each notebook creates a single data entity. To run some quality checks, aggregated data is queried and loaded into a DataFrame and unit tests are executed against it. The results of the tests are logged into S3 and loaded during the test validation phase to assess coverage."
},
{
"code": null,
"e": 2216,
"s": 2125,
"text": "This process currently works but there are a few things that I think could be done better."
},
{
"code": null,
"e": 2365,
"s": 2216,
"text": "Unit-test code exists in each notebook. But most notebooks are pretty much running exactly the same unit tests. This results in duplication of code."
},
{
"code": null,
"e": 2634,
"s": 2365,
"text": "It would be nice to have a user-friendly static website shared across the team that shows what exactly is going on with our data in any environment. In our current setup, all we get is a long list of test results outputted in the console when the pipeline is executed."
},
{
"code": null,
"e": 2967,
"s": 2634,
"text": "We are anticipating a big move from Redshift and Spark environment to Snowflake soon which means weβll have to ditch our notebooks that are mainly written in Pyspark and where unit tests currently reside, and possibly switch to DBT to do our data transformations. Weβll have to find a way to run the tests against a new data source."
},
{
"code": null,
"e": 3345,
"s": 2967,
"text": "Sure we can always refactor the code to generate a reusable test module that is decoupled and implements dependency injection so that it doesnβt matter if the source of data changes. Documentation, on the other hand, is not trivial to implement. It would be lovely if there is such a tool that will help overcome the current limitations and offers way more beneficial features."
},
{
"code": null,
"e": 3720,
"s": 3345,
"text": "Unit tests or expectations translated directly into documentationAdd extra information about the unit testDocumentation can be hosted in S3Ready-to-use common data quality test functionsSupport most of the data quality checks we already doSupport for multiple Pandas/Spark DataFrame and Snowflake Data sourceExtensibleIntegration with orchestration tools like Airflow or DBT"
},
{
"code": null,
"e": 3786,
"s": 3720,
"text": "Unit tests or expectations translated directly into documentation"
},
{
"code": null,
"e": 3828,
"s": 3786,
"text": "Add extra information about the unit test"
},
{
"code": null,
"e": 3862,
"s": 3828,
"text": "Documentation can be hosted in S3"
},
{
"code": null,
"e": 3910,
"s": 3862,
"text": "Ready-to-use common data quality test functions"
},
{
"code": null,
"e": 3964,
"s": 3910,
"text": "Support most of the data quality checks we already do"
},
{
"code": null,
"e": 4034,
"s": 3964,
"text": "Support for multiple Pandas/Spark DataFrame and Snowflake Data source"
},
{
"code": null,
"e": 4045,
"s": 4034,
"text": "Extensible"
},
{
"code": null,
"e": 4102,
"s": 4045,
"text": "Integration with orchestration tools like Airflow or DBT"
},
{
"code": null,
"e": 4348,
"s": 4102,
"text": "We know how powerful and convenient documentation is but nobody wants to maintain separate documentation just to ensure it is synced with the latest code. I prefer an out-of-the-box feature that generates documentation out of unit tests defined."
},
{
"code": null,
"e": 4390,
"s": 4348,
"text": "Great expectations: Supported; Score: 1/8"
},
{
"code": null,
"e": 4566,
"s": 4390,
"text": "I want to be able to elaborate on what is exactly being tested instead of generic descriptions to make it comprehensible enough for anyone who checks the data quality reports."
},
{
"code": null,
"e": 4646,
"s": 4566,
"text": "According to the style guide, it is recommended to use google style docstrings."
},
{
"code": null,
"e": 4688,
"s": 4646,
"text": "Great expectations: Supported; Score: 2/8"
},
{
"code": null,
"e": 4836,
"s": 4688,
"text": "AWS S3 is a convenient location to host static websites and since we already use S3, might as well use the same to deploy the shared documentation."
},
{
"code": null,
"e": 4935,
"s": 4836,
"text": "By default, docs are stored locally but can also be deployed in a cloud blob storage including S3."
},
{
"code": null,
"e": 4977,
"s": 4935,
"text": "Great expectations: Supported; Score: 3/8"
},
{
"code": null,
"e": 5149,
"s": 4977,
"text": "It would greatly expedite the refactoring process if there are already native unit test functions that we can leverage to avoid having to write dynamic tests from scratch."
},
{
"code": null,
"e": 5191,
"s": 5149,
"text": "Great expectations: Supported; Score: 4/8"
},
{
"code": null,
"e": 5314,
"s": 5191,
"text": "Iβve searched for available expectations that potentially match our most common data quality checks at least semantically."
},
{
"code": null,
"e": 5363,
"s": 5314,
"text": "Great expectations: Mostly supported; Score: 5/8"
},
{
"code": null,
"e": 5455,
"s": 5363,
"text": "It should work for both our current and future environment with minimal effort to refactor."
},
{
"code": null,
"e": 5543,
"s": 5455,
"text": "Data sources supported: Pandas DataFrame, Spark DataFrame, and Snowflake via SQLAlchemy"
},
{
"code": null,
"e": 5585,
"s": 5543,
"text": "Great expectations: Supported; Score: 6/8"
},
{
"code": null,
"e": 5769,
"s": 5585,
"text": "Even though it supports most of our data quality expectations out of the box, it should be flexible enough to be able to write custom tests for those that it doesnβt natively support."
},
{
"code": null,
"e": 5811,
"s": 5769,
"text": "Great expectations: Supported; Score: 7/8"
},
{
"code": null,
"e": 5906,
"s": 5811,
"text": "It should support integration with the existing orchestration platforms in our data ecosystem."
},
{
"code": null,
"e": 6033,
"s": 5906,
"text": "Validations can be run via Airflow through BashOperator or PythonOperator. There is also an extension package for DBT as well."
},
{
"code": null,
"e": 6075,
"s": 6033,
"text": "Great expectations: Supported; Score: 8/8"
},
{
"code": null,
"e": 6302,
"s": 6075,
"text": "Looks like Great Expectations looks good on paper but will it actually do the work? Letβs find out! Follow me in my journey as I try out the tool and explore its capabilities. Watch out for the next sequel in this blog series."
}
] |
Given an array arr[], find the maximum j - i such that arr[j] > arr[i] - GeeksforGeeks
|
16 Mar, 2022
Given an array arr[], find the maximum j β i such that arr[j] > arr[i].
Examples :
Input: {34, 8, 10, 3, 2, 80, 30, 33, 1}
Output: 6 (j = 7, i = 1)
Input: {9, 2, 3, 4, 5, 6, 7, 8, 18, 0}
Output: 8 ( j = 8, i = 0)
Input: {1, 2, 3, 4, 5, 6}
Output: 5 (j = 5, i = 0)
Input: {6, 5, 4, 3, 2, 1}
Output: -1
Method 1 (Simple but Inefficient) Run two loops. In the outer loop, pick elements one by one from the left. In the inner loop, compare the picked element with the elements starting from the right side. Stop the inner loop when you see an element greater than the picked element and keep updating the maximum j-i so far.
C++
C
Java
Python3
C#
PHP
Javascript
// CPP program for the above approach#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff;} int main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << "\n" << maxDiff; return 0;} // This code is contributed// by Akanksha Rai(Abby_akku)
// C program for the above approach#include <stdio.h>/* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff;} int main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); printf("\n %d", maxDiff); getchar(); return 0;}
// Java program for the above approachclass FindMaximum { /* For a given array arr[], returns the maximum j-i such that arr[j] > arr[i] */ int maxIndexDiff(int arr[], int n) { int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff; } /* Driver program to test above functions */ public static void main(String[] args) { FindMaximum max = new FindMaximum(); int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.length; int maxDiff = max.maxIndexDiff(arr, n); System.out.println(maxDiff); }}
# Python3 program to find the maximum# j β i such that arr[j] > arr[i] # For a given array arr[], returns# the maximum j β i such that# arr[j] > arr[i] def maxIndexDiff(arr, n): maxDiff = -1 for i in range(0, n): j = n - 1 while(j > i): if arr[j] > arr[i] and maxDiff < (j - i): maxDiff = j - i j -= 1 return maxDiff # driver codearr = [9, 2, 3, 4, 5, 6, 7, 8, 18, 0]n = len(arr)maxDiff = maxIndexDiff(arr, n)print(maxDiff) # This article is contributed by Smitha Dinesh Semwal
// C# program to find the maximum// j β i such that arr[j] > arr[i]using System;class GFG { // For a given array arr[], returns // the maximum j-i such that arr[j] > arr[i] static int maxIndexDiff(int[] arr, int n) { int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff; } // Driver program public static void Main() { int[] arr = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }}// This Code is Contributed by Sam007
<?php// PHP program to find the maximum// j β i such that arr[j] > arr[i] // For a given array arr[], returns// the maximum j β i such that// arr[j] > arr[i]function maxIndexDiff($arr, $n){ $maxDiff = -1; for ($i = 0; $i < $n; ++$i) { for ($j = $n - 1; $j > $i; --$j) { if($arr[$j] > $arr[$i] && $maxDiff < ($j - $i)) $maxDiff = $j - $i; } } return $maxDiff;} // Driver Code$arr = array(9, 2, 3, 4, 5, 6, 7, 8, 18, 0);$n = count($arr);$maxDiff = maxIndexDiff($arr, $n);echo $maxDiff ; // This code is contributed by Sam007?>
<script>// JavaScript program for the above approach /* For a given array arr[],returns the maximum j β i suchthat arr[j] > arr[i] */function maxIndexDiff(arr, n){ let maxDiff = -1; let i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff;} // Driver code let arr = [ 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 ]; let n = arr.length; let maxDiff = maxIndexDiff(arr, n); document.write(maxDiff); // This code is contributed by Manoj.</script>
8
Time Complexity: O(n^2)
Method 2 β Improvising the Brute Force Algorithm and looking for BUD, i.e Bottlenecks, unnecessary and duplicated works. A quick observation actually shows that we have been looking to find the first greatest element traversing from the end of the array to the current index. We can see that we are trying to find the first greatest element again and again for each element in the array. Letβs say we have an array with us for example [1, 5, 12, 4, 9] now we know that 9 is the element that is greater than 1, 5, and 4 but why do we need to find that again and again. We can actually keep a track of the maximum number moving from the end to the start of the array. The approach will help us understand better and also this improvisation is great to come up with in an interview.
Approach :
Traverse the array from the end and keep a track of the maximum number to the right of the current index including selfNow we have a monotonous decreasing array, and we know we can use binary search to find the index of the rightmost greater elementNow we will just use binary search for each of the elements in the array and store the maximum difference of the indices and thatβs it we are done.
Traverse the array from the end and keep a track of the maximum number to the right of the current index including self
Now we have a monotonous decreasing array, and we know we can use binary search to find the index of the rightmost greater element
Now we will just use binary search for each of the elements in the array and store the maximum difference of the indices and thatβs it we are done.
C++
C
Java
Python3
C#
Javascript
/* For a given array arr[], calculates the maximum j β i such that arr[j] > arr[i] */#include <bits/stdc++.h>using namespace std; int main(){ vector<long long int> v{ 34, 8, 10, 3, 2, 80, 30, 33, 1 }; int n = v.size(); vector<long long int> maxFromEnd(n + 1, INT_MIN); // create an array maxfromEnd for (int i = v.size() - 1; i >= 0; i--) { maxFromEnd[i] = max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < v.size(); i++) { int low = i + 1, high = v.size() - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current answer and look // for further larger number to the right // side ans = max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // keeping a track of the // maximum difference in indices result = max(result, ans - i); } cout << result << endl;}
/* C program to implementthe above approach */ /* For a given array arr[],calculates the maximum j β isuch that arr[j] > arr[i] */#include <limits.h>#include <stdio.h> /* Function for maximum oftwo numbers in C */int max(int num1, int num2){ return (num1 > num2 ) ? num1 : num2;} int main(){ int v[] = { 34, 8, 10, 3, 2, 80, 30, 33, 1 }; int n = sizeof(v) / sizeof(v[0]); int maxFromEnd[n+1]; for (int i = 0; i < n+1; i++) { maxFromEnd[i] = INT_MIN; } // create an array maxfromEnd for (int i = n - 1; i >= 0; i--) { maxFromEnd[i] = max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < n; i++) { int low = i + 1, high = n - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current answer and look // for further larger number to the right // side ans = max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // keeping a track of the // maximum difference in indices result = max(result, ans - i); } printf("\n %d", result);} /* This code is contributed by Pushpesh Raj */
// Java program to implement// the above approach // For a given array arr[],// calculates the maximum j β i// such that arr[j] > arr[i]import java.util.*;class GFG{ public static void main(String[] args){ int []v = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = v.length; int []maxFromEnd = new int[n + 1]; Arrays.fill(maxFromEnd, Integer.MIN_VALUE); // Create an array maxfromEnd for (int i = v.length - 1; i >= 0; i--) { maxFromEnd[i] = Math.max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < v.length; i++) { int low = i + 1, high = v.length - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current // answer and look for further // larger number to the right side ans = Math.max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // Keeping a track of the // maximum difference in indices result = Math.max(result, ans - i); } System.out.print(result + "\n");}} // This code is contributed by shikhasingrajput
# Python3 program to implement# the above approach # For a given array arr,# calculates the maximum j β i# such that arr[j] > arr[i] # Driver codeif __name__ == '__main__': v = [34, 8, 10, 3, 2, 80, 30, 33, 1]; n = len(v); maxFromEnd = [-38749432] * (n + 1); # Create an array maxfromEnd for i in range(n - 1, 0, -1): maxFromEnd[i] = max(maxFromEnd[i + 1], v[i]); result = 0; for i in range(0, n): low = i + 1; high = n - 1; ans = i; while (low <= high): mid = int((low + high) / 2); if (v[i] <= maxFromEnd[mid]): # We store this as current # answer and look for further # larger number to the right side ans = max(ans, mid); low = mid + 1; else: high = mid - 1; # Keeping a track of the # maximum difference in indices result = max(result, ans - i); print(result, end = ""); # This code is contributed by Rajput-Ji
// C# program to implement// the above approach // For a given array []arr,// calculates the maximum j β i// such that arr[j] > arr[i]using System;class GFG{ public static void Main(String[] args){ int []v = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = v.Length; int []maxFromEnd = new int[n + 1]; for (int i = 0; i < maxFromEnd.Length; i++) maxFromEnd[i] = int.MinValue; // Create an array maxfromEnd for (int i = v.Length - 1; i >= 0; i--) { maxFromEnd[i] = Math.Max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < v.Length; i++) { int low = i + 1, high = v.Length - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current // answer and look for further // larger number to the right side ans = Math.Max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // Keeping a track of the // maximum difference in indices result = Math.Max(result, ans - i); } Console.Write(result + "\n");}} // This code is contributed by shikhasingrajput
<script> // Javascript program to implement // the above approach // For a given array []arr, // calculates the maximum j β i // such that arr[j] > arr[i] let v = [34, 8, 10, 3, 2, 80, 30, 33, 1]; let n = v.length; let maxFromEnd = new Array(n + 1); for (let i = 0; i < maxFromEnd.length; i++) maxFromEnd[i] = Number.MIN_VALUE; // Create an array maxfromEnd for (let i = v.length - 1; i >= 0; i--) { maxFromEnd[i] = Math.max(maxFromEnd[i + 1], v[i]); } let result = 0; for (let i = 0; i < v.length; i++) { let low = i + 1, high = v.length - 1, ans = i; while (low <= high) { let mid = parseInt((low + high) / 2, 10); if (v[i] <= maxFromEnd[mid]) { // We store this as current // answer and look for further // larger number to the right side ans = Math.max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // Keeping a track of the // maximum difference in indices result = Math.max(result, ans - i); } document.write(result); </script>
6
Time complexity : O(N*log(N)) Space complexity: O(N)
Method 3 O(nLgn):Use hashing and sorting to solve this problem in less than quadratic complexity after taking special care of the duplicates. Approach :
Traverse the array and store the index of each element in a list (to handle duplicates).Sort the array.Now traverse the array and keep track of the maximum difference of i and j.For j consider the last index from the list of possible index of the element and for i consider the first index from the list. (As the index were appended in ascending order).Keep updating the max difference till the end of the array.
Traverse the array and store the index of each element in a list (to handle duplicates).
Sort the array.
Now traverse the array and keep track of the maximum difference of i and j.
For j consider the last index from the list of possible index of the element and for i consider the first index from the list. (As the index were appended in ascending order).
Keep updating the max difference till the end of the array.
C++
Java
Python3
C#
Javascript
// C++ implementation of// the hashmap approach#include <bits/stdc++.h>using namespace std; // Function to find maximum// index differenceint maxIndexDiff(vector<int>& arr, int n){ // Initialise unordered_map unordered_map<int, vector<int> > hashmap; // Iterate from 0 to n - 1 for (int i = 0; i < n; i++) { hashmap[arr[i]].push_back(i); } // Sort arr sort(arr.begin(), arr.end()); int maxDiff = INT_MIN; int temp = n; // Iterate from 0 to n - 1 for (int i = 0; i < n; i++) { if (temp > hashmap[arr[i]][0]) { temp = hashmap[arr[i]][0]; } maxDiff = max( maxDiff, hashmap[arr[i]][hashmap[arr[i]].size() - 1] - temp); } return maxDiff;} // Driver Codeint main(){ int n = 9; vector<int> arr{ 34, 8, 10, 3, 2, 80, 30, 33, 1 }; // Function Call int ans = maxIndexDiff(arr, n); cout << "The maxIndexDiff is : " << ans << endl; return 1;}
// Java implementation of// the hashmap approachimport java.io.*;import java.util.*; class GFG{ // Function to find maximum// index differencestatic int maxIndexDiff(ArrayList<Integer> arr, int n){ // Initialise unordered_map Map<Integer, ArrayList<Integer>> hashmap = new HashMap<Integer, ArrayList<Integer>>(); // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if(hashmap.containsKey(arr.get(i))) { hashmap.get(arr.get(i)).add(i); } else { hashmap.put(arr.get(i), new ArrayList<Integer>()); hashmap.get(arr.get(i)).add(i); } } // Sort arr Collections.sort(arr); int maxDiff = Integer.MIN_VALUE; int temp = n; // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if (temp > hashmap.get(arr.get(i)).get(0)) { temp = hashmap.get(arr.get(i)).get(0); } maxDiff = Math.max(maxDiff, hashmap.get(arr.get(i)).get( hashmap.get(arr.get(i)).size() - 1) - temp); } return maxDiff;} // Driver Codepublic static void main(String[] args){ int n = 9; ArrayList<Integer> arr = new ArrayList<Integer>( Arrays.asList(34, 8, 10, 3, 2, 80, 30, 33, 1)); // Function Call int ans = maxIndexDiff(arr, n); System.out.println("The maxIndexDiff is : " + ans);}} // This code is contributed by avanitrachhadiya2155
# Python3 implementation of the above approachn = 9a = [34, 8, 10, 3, 2, 80, 30, 33, 1] # To store the index of an element.index = dict()for i in range(n): if a[i] in index: # append to list (for duplicates) index[a[i]].append(i) else: # if first occurrence index[a[i]] = [i] # sort the input arraya.sort() maxDiff = 0 # Temporary variable to keep track of minimum itemp = n for i in range(n): if temp > index[a[i]][0]: temp = index[a[i]][0] maxDiff = max(maxDiff, index[a[i]][-1]-temp) print(maxDiff)
// C# implementation of// the hashmap approach using System;using System.Collections.Generic; public class GFG{ // Function to find maximum // index difference static int maxIndexDiff(List<int> arr, int n) { Dictionary<int,List<int>> hashmap = new Dictionary<int,List<int>>(); // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if(hashmap.ContainsKey(arr[i])) { hashmap[arr[i]].Add(i); } else { hashmap.Add(arr[i], new List<int>()); hashmap[arr[i]].Add(i); } } // Sort arr arr.Sort(); int maxDiff = -1; int temp = n; // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if(temp > hashmap[arr[i]][0] ) { temp = hashmap[arr[i]][0]; } maxDiff = Math.Max(maxDiff,hashmap[arr[i]][hashmap[arr[i]].Count - 1]- temp); } return maxDiff; } // Driver Code static public void Main (){ int n = 9; List<int> arr = new List<int>(); arr.Add(34); arr.Add(8); arr.Add(10); arr.Add(3); arr.Add(2); arr.Add(80); arr.Add(30); arr.Add(33); arr.Add(1); // Function Call int ans = maxIndexDiff(arr, n); Console.WriteLine("The maxIndexDiff is : " + ans ); }} // This code is contributed by rag2127.
<script>// JavaScript implementation of// the hashmap approach // Function to find maximum// index differencefunction maxIndexDiff(arr,n){ // Initialise map in JavaScript let hashmap = new Map() // Iterate from 0 to n - 1 for (let i = 0; i < n; i++) { hashmap[arr[i]] = hashmap[arr[i]] || [] hashmap[arr[i]].push(i) } // Sort arr arr.sort((a,b)=> (a - b)) let maxDiff = 0 let temp = n // Iterate from 0 to n - 1 for (let i = 0; i < n; i++) { if (temp > hashmap[arr[i]][0]) { temp = hashmap[arr[i]][0] } maxDiff = Math.max( maxDiff,hashmap[arr[i]][hashmap[arr[i]].length - 1]- temp ) } return maxDiff} // Driver Code let n = 9const arr = [ 34, 8, 10, 3, 2, 80, 30, 33, 1 ] // Function Calllet ans = maxIndexDiff(arr, n)document.write(`The maxIndexDiff is : ${ans}`) // This code is contributed by shinjanpatra </script>
The maxIndexDiff is : 6
Time complexity : O(N*log(N))
Method 4 (Efficient) To solve this problem, we need to get two optimum indexes of arr[]: left index i and right index j. For an element arr[i], we do not need to consider arr[i] for left index if there is an element smaller than arr[i] on left side of arr[i]. Similarly, if there is a greater element on right side of arr[j] then we do not need to consider this j for right index. So we construct two auxiliary arrays LMin[] and RMax[] such that LMin[i] holds the smallest element on left side of arr[i] including arr[i], and RMax[j] holds the greatest element on right side of arr[j] including arr[j]. After constructing these two auxiliary arrays, we traverse both of these arrays from left to right. While traversing LMin[] and RMax[] if we see that LMin[i] is greater than RMax[j], then we must move ahead in LMin[] (or do i++) because all elements on left of LMin[i] are greater than or equal to LMin[i]. Otherwise we must move ahead in RMax[j] to look for a greater j β i value.
Thanks to celicom for suggesting the algorithm for this method.
Working Example:
Lets consider any example [7 3 1 8 9 10 4 5 6]
what is maxRight ?
Filling from right side 6 is first element now 6 > 5 so again we fill 6 till we reach 10 > 6 :
[10 10 10 10 10 10 6 6 6] this is maxR
[7 3 1 1 1 1 1 1 1 ] this is minL
now we see that how to reach answer from these to and its proof !!!
lets compare first elements of the arrays now we see 10 > 7,
now we increase maxR by 1 till it becomes lesser than 7 i.e at index 5
hence answer till now is. 5-0 = 5
now we will increase minL we get 3 which is lesser than 6 so we increase maxR till it reaches last index and the answer becomes 8-1= 7
so we see how we are getting correct answer.
As we need the max difference j β i such that A[i]<= A[j], hence we do not need to consider element after the index j and element before index i.
in previous hint, make 2 arrays,
First, will store smallest occurring element before the element
Second, will store largest occurring element after the element
Traverse the Second array, till the element in second array is larger than or equal to First array, and store the index difference. And if it becomes smaller, traverse the first array till it again becomes larger.
And store the max difference of this index difference.
C++
C
Java
Python3
C#
PHP
Javascript
#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff; int i, j; int* LMin = new int[(sizeof(int) * n)]; int* RMax = new int[(sizeof(int) * n)]; /* Construct LMin[] such that LMin[i] stores the minimum value from (arr[0], arr[1], ... arr[i]) */ LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); /* Construct RMax[] such that RMax[j] stores the maximum value from (arr[j], arr[j+1], ..arr[n-1]) */ RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); /* Traverse both arrays from left to right to find optimum j - i. This process is similar to merge() of MergeSort */ i = 0, j = 0, maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff;} // Driver Codeint main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << maxDiff; return 0;} // This code is contributed by rathbhupendra
#include <stdio.h> /* Utility Functions to get max and minimum of two integers */int max(int x, int y){ return x > y ? x : y;} int min(int x, int y){ return x < y ? x : y;} /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff; int i, j; int* LMin = (int*)malloc(sizeof(int) * n); int* RMax = (int*)malloc(sizeof(int) * n); /* Construct LMin[] such that LMin[i] stores the minimum value from (arr[0], arr[1], ... arr[i]) */ LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); /* Construct RMax[] such that RMax[j] stores the maximum value from (arr[j], arr[j+1], ..arr[n-1]) */ RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); /* Traverse both arrays from left to right to find optimum j - i This process is similar to merge() of MergeSort */ i = 0, j = 0, maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff;} /* Driver program to test above functions */int main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); printf("\n %d", maxDiff); getchar(); return 0;}
class FindMaximum { /* Utility Functions to get max and minimum of two integers */ int max(int x, int y) { return x > y ? x : y; } int min(int x, int y) { return x < y ? x : y; } /* For a given array arr[], returns the maximum j-i such that arr[j] > arr[i] */ int maxIndexDiff(int arr[], int n) { int maxDiff; int i, j; int RMax[] = new int[n]; int LMin[] = new int[n]; /* Construct LMin[] such that LMin[i] stores the minimum value from (arr[0], arr[1], ... arr[i]) */ LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); /* Construct RMax[] such that RMax[j] stores the maximum value from (arr[j], arr[j+1], ..arr[n-1]) */ RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); /* Traverse both arrays from left to right to find optimum j - i This process is similar to merge() of MergeSort */ i = 0; j = 0; maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff; } /* Driver program to test the above functions */ public static void main(String[] args) { FindMaximum max = new FindMaximum(); int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.length; int maxDiff = max.maxIndexDiff(arr, n); System.out.println(maxDiff); }}
# Utility Functions to get max# and minimum of two integersdef max(a, b): if(a > b): return a else: return b def min(a, b): if(a < b): return a else: return b # For a given array arr[],# returns the maximum j - i# such that arr[j] > arr[i]def maxIndexDiff(arr, n): maxDiff = 0; LMin = [0] * n RMax = [0] * n # Construct LMin[] such that # LMin[i] stores the minimum # value from (arr[0], arr[1], # ... arr[i]) LMin[0] = arr[0] for i in range(1, n): LMin[i] = min(arr[i], LMin[i - 1]) # Construct RMax[] such that # RMax[j] stores the maximum # value from (arr[j], arr[j + 1], # ..arr[n-1]) RMax[n - 1] = arr[n - 1] for j in range(n - 2, -1, -1): RMax[j] = max(arr[j], RMax[j + 1]); # Traverse both arrays from left # to right to find optimum j - i # This process is similar to # merge() of MergeSort i, j = 0, 0 maxDiff = -1 while (j < n and i < n): if (LMin[i] <= RMax[j]): maxDiff = max(maxDiff, j - i) j = j + 1 else: i = i + 1 return maxDiff # Driver Codeif(__name__ == '__main__'): arr = [9, 2, 3, 4, 5, 6, 7, 8, 18, 0] n = len(arr) maxDiff = maxIndexDiff(arr, n) print (maxDiff) # This code is contributed# by gautam karakoti
// C# program to find the maximum// j β i such that arr[j] > arr[i]using System; class GFG { // Utility Functions to get max // and minimum of two integers static int max(int x, int y) { return x > y ? x : y; } static int min(int x, int y) { return x < y ? x : y; } // For a given array arr[], returns // the maximum j-i such thatarr[j] > arr[i] static int maxIndexDiff(int[] arr, int n) { int maxDiff; int i, j; int[] RMax = new int[n]; int[] LMin = new int[n]; // Construct LMin[] such that LMin[i] // stores the minimum value // from (arr[0], arr[1], ... arr[i]) LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); // Construct RMax[] such that // RMax[j] stores the maximum value // from (arr[j], arr[j+1], ..arr[n-1]) RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); // Traverse both arrays from left // to right to find optimum j - i // This process is similar to merge() // of MergeSort i = 0; j = 0; maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff; } // Driver program public static void Main() { int[] arr = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }}// This Code is Contributed by Sam007
<?php// PHP program to find the maximum// j β i such that arr[j] > arr[i] // For a given array arr[],// returns the maximum j - i// such that arr[j] > arr[i]function maxIndexDiff($arr, $n){ $maxDiff = 0; $LMin = array_fill(0, $n, NULL); $RMax = array_fill(0, $n, NULL); // Construct LMin[] such that // LMin[i] stores the minimum // value from (arr[0], arr[1], // ... arr[i]) $LMin[0] = $arr[0]; for($i = 1; $i < $n; $i++) $LMin[$i] = min($arr[$i], $LMin[$i - 1]); // Construct RMax[] such that // RMax[j] stores the maximum // value from (arr[j], arr[j+1], // ..arr[n-1]) $RMax[$n - 1] = $arr[$n - 1]; for($j = $n - 2; $j >= 0; $j--) $RMax[$j] = max($arr[$j], $RMax[$j + 1]); // Traverse both arrays from left // to right to find optimum j - i // This process is similar to // merge() of MergeSort $i = 0; $j = 0; $maxDiff = -1; while ($j < $n && $i < $n) if ($LMin[$i] <= $RMax[$j]) { $maxDiff = max($maxDiff, $j - $i); $j = $j + 1; } else $i = $i + 1; return $maxDiff;} // Driver Code$arr = array(9, 2, 3, 4, 5, 6, 7, 8, 18, 0);$n = sizeof($arr);$maxDiff = maxIndexDiff($arr, $n);echo $maxDiff; // This code is contributed// by ChitraNayal?>
<script> // Javascript program to find the maximum // j β i such that arr[j] > arr[i] // Utility Functions to get max // and minimum of two integers function max(x, y) { return x > y ? x : y; } function min(x, y) { return x < y ? x : y; } // For a given array arr[], returns // the maximum j-i such thatarr[j] > arr[i] function maxIndexDiff(arr, n) { let maxDiff; let i, j; let RMax = new Array(n); let LMin = new Array(n); // Construct LMin[] such that LMin[i] // stores the minimum value // from (arr[0], arr[1], ... arr[i]) LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); // Construct RMax[] such that // RMax[j] stores the maximum value // from (arr[j], arr[j+1], ..arr[n-1]) RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); // Traverse both arrays from left // to right to find optimum j - i // This process is similar to merge() // of MergeSort i = 0; j = 0; maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff; } let arr = [ 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 ]; let n = arr.length; let maxDiff = maxIndexDiff(arr, n); document.write(maxDiff);</script>
8
Time Complexity: O(n) Auxiliary Space: O(n)
Please write comments if you find the above codes/algorithms incorrect, or find other ways to solve the same problem.
We consider an auxiliary array : rightMax[] , such that, rightMax[i] = max element of the subarray arr[i...(n-1)], the largest or equal element after arr[i] element
Suppose (arr[i], arr[jLast] ) is a pair, such that arr[jLast] is the last greater or equal element than arr[i]. For the pairs ending with arr[jLast] : ( arr[k], arr[jLast] ) for all k = (i+1) to jLast
we donβt need to consider (jLast β k) because (jLast β i ) > (jLast β k) for all such kβs.
So we can skip those pairs.
Traversing from left to right of both arrays : arr[] and rightMax[] , when we first encounter rightMax[j] < arr[i[ , we know that jLast = j-1, and we can skip the pairs (arr[k], arr[jLast]) for all k = (i+1) to jLast.
And also rightMax[] is non increasing sequence , so all elements at right side of rightMax[j] is smaller than or equal to rightMax[j]. But there may be arr[x] after arr[i] (x > i) such that arr[x] < rightMax[j] for x > i, so increment i when rightMax[j] < arr[i] is encountered.
C++
Java
Python3
C#
Javascript
#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int rightMax[n]; rightMax[n-1]= arr[n-1]; for(int i = n-2; i>=0; i--) rightMax[i] = max(rightMax[i+1] , arr[i]); //rightMax[i] = max{ arr[i...(n-1] } int maxDist = INT_MIN; int i = 0, j = 0; while(i<n && j<n) { if(rightMax[j] >= arr[i]) { maxDist = max( maxDist, j-i ); j++; } else // if(rightMax[j] < leftMin[i]) i++; } return maxDist; } // Driver Codeint main(){ int arr[] = { 34,8,10,3,2,80,30,33,1}; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << maxDiff; return 0;} // This code is contributed by Sourashis Mondal
import java.util.*; class GFG{ /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */ static int maxIndexDiff(int arr[], int n) { int []rightMax = new int[n]; rightMax[n-1]= arr[n-1]; for(int i = n-2; i>=0; i--) rightMax[i] = Math.max(rightMax[i+1] , arr[i]); // rightMax[i] = max{ arr[i...(n-1] } int maxDist = Integer.MIN_VALUE; int i = 0, j = 0; while(i < n && j < n) { if(rightMax[j] >= arr[i]) { maxDist = Math.max( maxDist, j-i ); j++; } else // if(rightMax[j] < leftMin[i]) i++; } return maxDist; } // Driver Code public static void main(String[] args) { int arr[] = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.length; int maxDiff = maxIndexDiff(arr, n); System.out.print(maxDiff); }} // This code is contributed by Rajput-Ji
# For a given array arr[], returns the# maximum j β i such that arr[j] > arr[i]def maxIndexDiff(arr, n): rightMax = [0] * n rightMax[n - 1] = arr[n - 1] for i in range(n - 2, -1, -1): rightMax[i] = max(rightMax[i + 1], arr[i]) # rightMax[i] = max arr[i...(n-1] maxDist = -2**31 i = 0 j = 0 while (i < n and j < n): if (rightMax[j] >= arr[i]): maxDist = max(maxDist, j - i) j += 1 else: # if(rightMax[j] < leftMin[i]) i += 1 return maxDist # Driver Codearr = [ 34, 8, 10, 3, 2, 80, 30, 33, 1 ]n = len(arr)maxDiff = maxIndexDiff(arr, n) print(maxDiff) # This code is contributed by Shubham Singh
/* For a given array arr[], returns the maximum j β i such thatarr[j] > arr[i] */using System; public class GFG{ static int maxIndexDiff(int[] arr, int n) { int []rightMax = new int[n]; rightMax[n - 1] = arr[n - 1]; int i = 0, j = 0; for(i = n - 2; i >= 0; i--) rightMax[i] = Math.Max(rightMax[i+1] , arr[i]); // rightMax[i] = max{ arr[i...(n-1] } int maxDist = Int32.MinValue; i = 0; while(i < n && j < n) { if(rightMax[j] >= arr[i]) { maxDist = Math.Max( maxDist, j - i); j++; } else // if(rightMax[j] < leftMin[i]) i++; } return maxDist; } // Driver Code public static void Main() { int[] arr = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }} // This code is contributed by Shubham Singh
<script> /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */function maxIndexDiff(arr, n){ var rightMax = new Array(n).fill(0);; rightMax[n - 1] = arr[n - 1]; for(var i = n - 2; i >= 0; i--){ rightMax[i] = Math.max(rightMax[i+1] , arr[i]); } // rightMax[i] = max{ arr[i...(n-1] } var maxDist = Number.MIN_VALUE; var i = 0; var j = 0; while(i < n && j < n) { if(rightMax[j] >= arr[i]) { maxDist = Math.max( maxDist, j-i ); j++; } else // if(rightMax[j] < leftMin[i]) { i++; } } return maxDist; } // Driver Codevar arr = [ 34,8,10,3,2,80,30,33,1];var n = arr.length;var maxDiff = maxIndexDiff(arr, n);document.write(maxDiff); // This code is contributed by Shubham Singh </script>
6
We can also do this using leftMin[] array only , where leftMin[i] = min element of the subarray arr[0...i]
C++
Java
Python3
C#
Javascript
#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int leftMin[n] ; leftMin[0] = arr[0]; for(int i = 1 ; i<n; i++) leftMin[i] = min(leftMin[i-1], arr[i]); //leftMin[i] = min{ arr[0...i] } int maxDist = INT_MIN; int i = n-1, j = n-1; while(i>=0 && j>=0) { if(arr[j] >= leftMin[i]) { maxDist = max(maxDist, j-i); i--; } else j--; } return maxDist; } // Driver Codeint main(){ int arr[] = { 34,8,10,3,2,80,30,33,1}; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << maxDiff; return 0;} // This code is contributed by Sourashis Mondal
import java.util.*;class GFG{ /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */ static int maxIndexDiff(int arr[], int n) { int []leftMin = new int[n]; leftMin[0] = arr[0]; for(int i = 1; i < n; i++) leftMin[i] = Math.min(leftMin[i - 1] , arr[i]); // leftMin[i] = min{ arr[i...(n-1] } int maxDist = Integer.MIN_VALUE; int i = n - 1, j = n - 1; while(i >= 0 && j >= 0) { if(arr[j] >= leftMin[i]) { maxDist = Math.max( maxDist, j - i ); i--; } else j--; } return maxDist; } // Driver Code public static void main(String[] args) { int arr[] = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.length; int maxDiff = maxIndexDiff(arr, n); System.out.print(maxDiff); }} // This code is contributed by Shubham Singh
# For a given array arr[], # returns the maximum j β i such that# arr[j] > arr[i] */def maxIndexDiff(arr, n): leftMin = [0]*n leftMin[0] = arr[0] for i in range(1,n): leftMin[i] = min(leftMin[i-1], arr[i]) # leftMin[i] = min arr[0...i] maxDist = - 2**32 i = n-1 j = n-1 while(i>=0 and j>=0): if(arr[j] >= leftMin[i]): maxDist = max(maxDist, j-i) i-=1 else: j-=1 return maxDist # Driver Codearr = [34,8,10,3,2,80,30,33,1]n = len(arr)maxDiff = maxIndexDiff(arr, n)print(maxDiff) # This code is contributed by Shubham Singh
using System; public class GFG{ /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */ static int maxIndexDiff(int[] arr, int n) { int []leftMin = new int[n]; leftMin[0] = arr[0]; int i,j; for( i = 1; i < n; i++) leftMin[i] = Math.Min(leftMin[i - 1] , arr[i]); // leftMin[i] = min{ arr[i...(n-1] } int maxDist = Int32.MinValue; i = n - 1; j = n - 1; while(i >= 0 && j >= 0) { if(arr[j] >= leftMin[i]) { maxDist = Math.Max( maxDist, j - i ); i--; } else j--; } return maxDist; } // Driver Code static public void Main () { int[] arr = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }} // This code is contributed by Shubham Singh
<script> /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */function maxIndexDiff(arr, n){ var leftMin = new Array(n).fill(0);; leftMin[0] = arr[0]; for(var i = 1; i < n; i++){ leftMin[i] = Math.min(leftMin[i-1] , arr[i]); } // leftMin[i] = min{ arr[i...(n-1] } var maxDist = Number.MIN_VALUE; var i = n-1; var j = n-1; while(i >= 0 && j >= 0) { if(arr[j] >= leftMin[i]) { maxDist = Math.max( maxDist, j-i ); i--; } else // if(rightMax[j] < leftMin[i]) { j--; } } return maxDist; } // Driver Codevar arr = [ 34,8,10,3,2,80,30,33,1];var n = arr.length;var maxDiff = maxIndexDiff(arr, n);document.write(maxDiff); // This code is contributed by Shubham Singh </script>
6
Sam007
Gautam Karakoti
ukasp
Akanksha_Rai
rathbhupendra
msg
executable
shikhasingrajput
Rajput-Ji
winter_soldier
avanitrachhadiya2155
rag2127
mank1083
decode2207
suresh07
abhijit gupta 1
2011harshgupta
dheerwani2498
sourashis69
SHUBHAMSINGH10
simranarora5sos
shinjanpatra
pushpeshrajdx01
Amazon
Snapdeal
Arrays
Amazon
Snapdeal
Arrays
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Introduction to Arrays
Multidimensional Arrays in Java
Python | Using 2D arrays/lists the right way
Linked List vs Array
Given an array of size n and a number k, find all elements that appear more than n/k times
Queue | Set 1 (Introduction and Array Implementation)
Subset Sum Problem | DP-25
K'th Smallest/Largest Element in Unsorted Array | Set 1
Find the Missing Number
Array of Strings in C++ (5 Different Ways to Create)
|
[
{
"code": null,
"e": 24739,
"s": 24711,
"text": "\n16 Mar, 2022"
},
{
"code": null,
"e": 24811,
"s": 24739,
"text": "Given an array arr[], find the maximum j β i such that arr[j] > arr[i]."
},
{
"code": null,
"e": 24823,
"s": 24811,
"text": "Examples : "
},
{
"code": null,
"e": 25065,
"s": 24823,
"text": " Input: {34, 8, 10, 3, 2, 80, 30, 33, 1}\n Output: 6 (j = 7, i = 1)\n\n Input: {9, 2, 3, 4, 5, 6, 7, 8, 18, 0}\n Output: 8 ( j = 8, i = 0)\n\n Input: {1, 2, 3, 4, 5, 6}\n Output: 5 (j = 5, i = 0)\n\n Input: {6, 5, 4, 3, 2, 1}\n Output: -1 "
},
{
"code": null,
"e": 25386,
"s": 25065,
"text": "Method 1 (Simple but Inefficient) Run two loops. In the outer loop, pick elements one by one from the left. In the inner loop, compare the picked element with the elements starting from the right side. Stop the inner loop when you see an element greater than the picked element and keep updating the maximum j-i so far. "
},
{
"code": null,
"e": 25390,
"s": 25386,
"text": "C++"
},
{
"code": null,
"e": 25392,
"s": 25390,
"text": "C"
},
{
"code": null,
"e": 25397,
"s": 25392,
"text": "Java"
},
{
"code": null,
"e": 25405,
"s": 25397,
"text": "Python3"
},
{
"code": null,
"e": 25408,
"s": 25405,
"text": "C#"
},
{
"code": null,
"e": 25412,
"s": 25408,
"text": "PHP"
},
{
"code": null,
"e": 25423,
"s": 25412,
"text": "Javascript"
},
{
"code": "// CPP program for the above approach#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff;} int main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << \"\\n\" << maxDiff; return 0;} // This code is contributed// by Akanksha Rai(Abby_akku)",
"e": 26089,
"s": 25423,
"text": null
},
{
"code": "// C program for the above approach#include <stdio.h>/* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff;} int main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); printf(\"\\n %d\", maxDiff); getchar(); return 0;}",
"e": 26684,
"s": 26089,
"text": null
},
{
"code": "// Java program for the above approachclass FindMaximum { /* For a given array arr[], returns the maximum j-i such that arr[j] > arr[i] */ int maxIndexDiff(int arr[], int n) { int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff; } /* Driver program to test above functions */ public static void main(String[] args) { FindMaximum max = new FindMaximum(); int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.length; int maxDiff = max.maxIndexDiff(arr, n); System.out.println(maxDiff); }}",
"e": 27453,
"s": 26684,
"text": null
},
{
"code": "# Python3 program to find the maximum# j β i such that arr[j] > arr[i] # For a given array arr[], returns# the maximum j β i such that# arr[j] > arr[i] def maxIndexDiff(arr, n): maxDiff = -1 for i in range(0, n): j = n - 1 while(j > i): if arr[j] > arr[i] and maxDiff < (j - i): maxDiff = j - i j -= 1 return maxDiff # driver codearr = [9, 2, 3, 4, 5, 6, 7, 8, 18, 0]n = len(arr)maxDiff = maxIndexDiff(arr, n)print(maxDiff) # This article is contributed by Smitha Dinesh Semwal",
"e": 27994,
"s": 27453,
"text": null
},
{
"code": "// C# program to find the maximum// j β i such that arr[j] > arr[i]using System;class GFG { // For a given array arr[], returns // the maximum j-i such that arr[j] > arr[i] static int maxIndexDiff(int[] arr, int n) { int maxDiff = -1; int i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff; } // Driver program public static void Main() { int[] arr = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }}// This Code is Contributed by Sam007",
"e": 28739,
"s": 27994,
"text": null
},
{
"code": "<?php// PHP program to find the maximum// j β i such that arr[j] > arr[i] // For a given array arr[], returns// the maximum j β i such that// arr[j] > arr[i]function maxIndexDiff($arr, $n){ $maxDiff = -1; for ($i = 0; $i < $n; ++$i) { for ($j = $n - 1; $j > $i; --$j) { if($arr[$j] > $arr[$i] && $maxDiff < ($j - $i)) $maxDiff = $j - $i; } } return $maxDiff;} // Driver Code$arr = array(9, 2, 3, 4, 5, 6, 7, 8, 18, 0);$n = count($arr);$maxDiff = maxIndexDiff($arr, $n);echo $maxDiff ; // This code is contributed by Sam007?>",
"e": 29358,
"s": 28739,
"text": null
},
{
"code": "<script>// JavaScript program for the above approach /* For a given array arr[],returns the maximum j β i suchthat arr[j] > arr[i] */function maxIndexDiff(arr, n){ let maxDiff = -1; let i, j; for (i = 0; i < n; ++i) { for (j = n - 1; j > i; --j) { if (arr[j] > arr[i] && maxDiff < (j - i)) maxDiff = j - i; } } return maxDiff;} // Driver code let arr = [ 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 ]; let n = arr.length; let maxDiff = maxIndexDiff(arr, n); document.write(maxDiff); // This code is contributed by Manoj.</script>",
"e": 29957,
"s": 29358,
"text": null
},
{
"code": null,
"e": 29959,
"s": 29957,
"text": "8"
},
{
"code": null,
"e": 29983,
"s": 29959,
"text": "Time Complexity: O(n^2)"
},
{
"code": null,
"e": 30764,
"s": 29983,
"text": "Method 2 β Improvising the Brute Force Algorithm and looking for BUD, i.e Bottlenecks, unnecessary and duplicated works. A quick observation actually shows that we have been looking to find the first greatest element traversing from the end of the array to the current index. We can see that we are trying to find the first greatest element again and again for each element in the array. Letβs say we have an array with us for example [1, 5, 12, 4, 9] now we know that 9 is the element that is greater than 1, 5, and 4 but why do we need to find that again and again. We can actually keep a track of the maximum number moving from the end to the start of the array. The approach will help us understand better and also this improvisation is great to come up with in an interview. "
},
{
"code": null,
"e": 30777,
"s": 30764,
"text": "Approach : "
},
{
"code": null,
"e": 31174,
"s": 30777,
"text": "Traverse the array from the end and keep a track of the maximum number to the right of the current index including selfNow we have a monotonous decreasing array, and we know we can use binary search to find the index of the rightmost greater elementNow we will just use binary search for each of the elements in the array and store the maximum difference of the indices and thatβs it we are done."
},
{
"code": null,
"e": 31294,
"s": 31174,
"text": "Traverse the array from the end and keep a track of the maximum number to the right of the current index including self"
},
{
"code": null,
"e": 31425,
"s": 31294,
"text": "Now we have a monotonous decreasing array, and we know we can use binary search to find the index of the rightmost greater element"
},
{
"code": null,
"e": 31573,
"s": 31425,
"text": "Now we will just use binary search for each of the elements in the array and store the maximum difference of the indices and thatβs it we are done."
},
{
"code": null,
"e": 31577,
"s": 31573,
"text": "C++"
},
{
"code": null,
"e": 31579,
"s": 31577,
"text": "C"
},
{
"code": null,
"e": 31584,
"s": 31579,
"text": "Java"
},
{
"code": null,
"e": 31592,
"s": 31584,
"text": "Python3"
},
{
"code": null,
"e": 31595,
"s": 31592,
"text": "C#"
},
{
"code": null,
"e": 31606,
"s": 31595,
"text": "Javascript"
},
{
"code": "/* For a given array arr[], calculates the maximum j β i such that arr[j] > arr[i] */#include <bits/stdc++.h>using namespace std; int main(){ vector<long long int> v{ 34, 8, 10, 3, 2, 80, 30, 33, 1 }; int n = v.size(); vector<long long int> maxFromEnd(n + 1, INT_MIN); // create an array maxfromEnd for (int i = v.size() - 1; i >= 0; i--) { maxFromEnd[i] = max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < v.size(); i++) { int low = i + 1, high = v.size() - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current answer and look // for further larger number to the right // side ans = max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // keeping a track of the // maximum difference in indices result = max(result, ans - i); } cout << result << endl;}",
"e": 32710,
"s": 31606,
"text": null
},
{
"code": "/* C program to implementthe above approach */ /* For a given array arr[],calculates the maximum j β isuch that arr[j] > arr[i] */#include <limits.h>#include <stdio.h> /* Function for maximum oftwo numbers in C */int max(int num1, int num2){ return (num1 > num2 ) ? num1 : num2;} int main(){ int v[] = { 34, 8, 10, 3, 2, 80, 30, 33, 1 }; int n = sizeof(v) / sizeof(v[0]); int maxFromEnd[n+1]; for (int i = 0; i < n+1; i++) { maxFromEnd[i] = INT_MIN; } // create an array maxfromEnd for (int i = n - 1; i >= 0; i--) { maxFromEnd[i] = max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < n; i++) { int low = i + 1, high = n - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current answer and look // for further larger number to the right // side ans = max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // keeping a track of the // maximum difference in indices result = max(result, ans - i); } printf(\"\\n %d\", result);} /* This code is contributed by Pushpesh Raj */",
"e": 34023,
"s": 32710,
"text": null
},
{
"code": "// Java program to implement// the above approach // For a given array arr[],// calculates the maximum j β i// such that arr[j] > arr[i]import java.util.*;class GFG{ public static void main(String[] args){ int []v = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = v.length; int []maxFromEnd = new int[n + 1]; Arrays.fill(maxFromEnd, Integer.MIN_VALUE); // Create an array maxfromEnd for (int i = v.length - 1; i >= 0; i--) { maxFromEnd[i] = Math.max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < v.length; i++) { int low = i + 1, high = v.length - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current // answer and look for further // larger number to the right side ans = Math.max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // Keeping a track of the // maximum difference in indices result = Math.max(result, ans - i); } System.out.print(result + \"\\n\");}} // This code is contributed by shikhasingrajput",
"e": 35191,
"s": 34023,
"text": null
},
{
"code": "# Python3 program to implement# the above approach # For a given array arr,# calculates the maximum j β i# such that arr[j] > arr[i] # Driver codeif __name__ == '__main__': v = [34, 8, 10, 3, 2, 80, 30, 33, 1]; n = len(v); maxFromEnd = [-38749432] * (n + 1); # Create an array maxfromEnd for i in range(n - 1, 0, -1): maxFromEnd[i] = max(maxFromEnd[i + 1], v[i]); result = 0; for i in range(0, n): low = i + 1; high = n - 1; ans = i; while (low <= high): mid = int((low + high) / 2); if (v[i] <= maxFromEnd[mid]): # We store this as current # answer and look for further # larger number to the right side ans = max(ans, mid); low = mid + 1; else: high = mid - 1; # Keeping a track of the # maximum difference in indices result = max(result, ans - i); print(result, end = \"\"); # This code is contributed by Rajput-Ji",
"e": 36271,
"s": 35191,
"text": null
},
{
"code": "// C# program to implement// the above approach // For a given array []arr,// calculates the maximum j β i// such that arr[j] > arr[i]using System;class GFG{ public static void Main(String[] args){ int []v = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = v.Length; int []maxFromEnd = new int[n + 1]; for (int i = 0; i < maxFromEnd.Length; i++) maxFromEnd[i] = int.MinValue; // Create an array maxfromEnd for (int i = v.Length - 1; i >= 0; i--) { maxFromEnd[i] = Math.Max(maxFromEnd[i + 1], v[i]); } int result = 0; for (int i = 0; i < v.Length; i++) { int low = i + 1, high = v.Length - 1, ans = i; while (low <= high) { int mid = (low + high) / 2; if (v[i] <= maxFromEnd[mid]) { // We store this as current // answer and look for further // larger number to the right side ans = Math.Max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // Keeping a track of the // maximum difference in indices result = Math.Max(result, ans - i); } Console.Write(result + \"\\n\");}} // This code is contributed by shikhasingrajput",
"e": 37483,
"s": 36271,
"text": null
},
{
"code": "<script> // Javascript program to implement // the above approach // For a given array []arr, // calculates the maximum j β i // such that arr[j] > arr[i] let v = [34, 8, 10, 3, 2, 80, 30, 33, 1]; let n = v.length; let maxFromEnd = new Array(n + 1); for (let i = 0; i < maxFromEnd.length; i++) maxFromEnd[i] = Number.MIN_VALUE; // Create an array maxfromEnd for (let i = v.length - 1; i >= 0; i--) { maxFromEnd[i] = Math.max(maxFromEnd[i + 1], v[i]); } let result = 0; for (let i = 0; i < v.length; i++) { let low = i + 1, high = v.length - 1, ans = i; while (low <= high) { let mid = parseInt((low + high) / 2, 10); if (v[i] <= maxFromEnd[mid]) { // We store this as current // answer and look for further // larger number to the right side ans = Math.max(ans, mid); low = mid + 1; } else { high = mid - 1; } } // Keeping a track of the // maximum difference in indices result = Math.max(result, ans - i); } document.write(result); </script>",
"e": 38650,
"s": 37483,
"text": null
},
{
"code": null,
"e": 38652,
"s": 38650,
"text": "6"
},
{
"code": null,
"e": 38705,
"s": 38652,
"text": "Time complexity : O(N*log(N)) Space complexity: O(N)"
},
{
"code": null,
"e": 38860,
"s": 38705,
"text": "Method 3 O(nLgn):Use hashing and sorting to solve this problem in less than quadratic complexity after taking special care of the duplicates. Approach : "
},
{
"code": null,
"e": 39273,
"s": 38860,
"text": "Traverse the array and store the index of each element in a list (to handle duplicates).Sort the array.Now traverse the array and keep track of the maximum difference of i and j.For j consider the last index from the list of possible index of the element and for i consider the first index from the list. (As the index were appended in ascending order).Keep updating the max difference till the end of the array."
},
{
"code": null,
"e": 39362,
"s": 39273,
"text": "Traverse the array and store the index of each element in a list (to handle duplicates)."
},
{
"code": null,
"e": 39378,
"s": 39362,
"text": "Sort the array."
},
{
"code": null,
"e": 39454,
"s": 39378,
"text": "Now traverse the array and keep track of the maximum difference of i and j."
},
{
"code": null,
"e": 39630,
"s": 39454,
"text": "For j consider the last index from the list of possible index of the element and for i consider the first index from the list. (As the index were appended in ascending order)."
},
{
"code": null,
"e": 39690,
"s": 39630,
"text": "Keep updating the max difference till the end of the array."
},
{
"code": null,
"e": 39694,
"s": 39690,
"text": "C++"
},
{
"code": null,
"e": 39699,
"s": 39694,
"text": "Java"
},
{
"code": null,
"e": 39707,
"s": 39699,
"text": "Python3"
},
{
"code": null,
"e": 39710,
"s": 39707,
"text": "C#"
},
{
"code": null,
"e": 39721,
"s": 39710,
"text": "Javascript"
},
{
"code": "// C++ implementation of// the hashmap approach#include <bits/stdc++.h>using namespace std; // Function to find maximum// index differenceint maxIndexDiff(vector<int>& arr, int n){ // Initialise unordered_map unordered_map<int, vector<int> > hashmap; // Iterate from 0 to n - 1 for (int i = 0; i < n; i++) { hashmap[arr[i]].push_back(i); } // Sort arr sort(arr.begin(), arr.end()); int maxDiff = INT_MIN; int temp = n; // Iterate from 0 to n - 1 for (int i = 0; i < n; i++) { if (temp > hashmap[arr[i]][0]) { temp = hashmap[arr[i]][0]; } maxDiff = max( maxDiff, hashmap[arr[i]][hashmap[arr[i]].size() - 1] - temp); } return maxDiff;} // Driver Codeint main(){ int n = 9; vector<int> arr{ 34, 8, 10, 3, 2, 80, 30, 33, 1 }; // Function Call int ans = maxIndexDiff(arr, n); cout << \"The maxIndexDiff is : \" << ans << endl; return 1;}",
"e": 40696,
"s": 39721,
"text": null
},
{
"code": "// Java implementation of// the hashmap approachimport java.io.*;import java.util.*; class GFG{ // Function to find maximum// index differencestatic int maxIndexDiff(ArrayList<Integer> arr, int n){ // Initialise unordered_map Map<Integer, ArrayList<Integer>> hashmap = new HashMap<Integer, ArrayList<Integer>>(); // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if(hashmap.containsKey(arr.get(i))) { hashmap.get(arr.get(i)).add(i); } else { hashmap.put(arr.get(i), new ArrayList<Integer>()); hashmap.get(arr.get(i)).add(i); } } // Sort arr Collections.sort(arr); int maxDiff = Integer.MIN_VALUE; int temp = n; // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if (temp > hashmap.get(arr.get(i)).get(0)) { temp = hashmap.get(arr.get(i)).get(0); } maxDiff = Math.max(maxDiff, hashmap.get(arr.get(i)).get( hashmap.get(arr.get(i)).size() - 1) - temp); } return maxDiff;} // Driver Codepublic static void main(String[] args){ int n = 9; ArrayList<Integer> arr = new ArrayList<Integer>( Arrays.asList(34, 8, 10, 3, 2, 80, 30, 33, 1)); // Function Call int ans = maxIndexDiff(arr, n); System.out.println(\"The maxIndexDiff is : \" + ans);}} // This code is contributed by avanitrachhadiya2155",
"e": 42172,
"s": 40696,
"text": null
},
{
"code": "# Python3 implementation of the above approachn = 9a = [34, 8, 10, 3, 2, 80, 30, 33, 1] # To store the index of an element.index = dict()for i in range(n): if a[i] in index: # append to list (for duplicates) index[a[i]].append(i) else: # if first occurrence index[a[i]] = [i] # sort the input arraya.sort() maxDiff = 0 # Temporary variable to keep track of minimum itemp = n for i in range(n): if temp > index[a[i]][0]: temp = index[a[i]][0] maxDiff = max(maxDiff, index[a[i]][-1]-temp) print(maxDiff)",
"e": 42732,
"s": 42172,
"text": null
},
{
"code": "// C# implementation of// the hashmap approach using System;using System.Collections.Generic; public class GFG{ // Function to find maximum // index difference static int maxIndexDiff(List<int> arr, int n) { Dictionary<int,List<int>> hashmap = new Dictionary<int,List<int>>(); // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if(hashmap.ContainsKey(arr[i])) { hashmap[arr[i]].Add(i); } else { hashmap.Add(arr[i], new List<int>()); hashmap[arr[i]].Add(i); } } // Sort arr arr.Sort(); int maxDiff = -1; int temp = n; // Iterate from 0 to n - 1 for(int i = 0; i < n; i++) { if(temp > hashmap[arr[i]][0] ) { temp = hashmap[arr[i]][0]; } maxDiff = Math.Max(maxDiff,hashmap[arr[i]][hashmap[arr[i]].Count - 1]- temp); } return maxDiff; } // Driver Code static public void Main (){ int n = 9; List<int> arr = new List<int>(); arr.Add(34); arr.Add(8); arr.Add(10); arr.Add(3); arr.Add(2); arr.Add(80); arr.Add(30); arr.Add(33); arr.Add(1); // Function Call int ans = maxIndexDiff(arr, n); Console.WriteLine(\"The maxIndexDiff is : \" + ans ); }} // This code is contributed by rag2127.",
"e": 43998,
"s": 42732,
"text": null
},
{
"code": "<script>// JavaScript implementation of// the hashmap approach // Function to find maximum// index differencefunction maxIndexDiff(arr,n){ // Initialise map in JavaScript let hashmap = new Map() // Iterate from 0 to n - 1 for (let i = 0; i < n; i++) { hashmap[arr[i]] = hashmap[arr[i]] || [] hashmap[arr[i]].push(i) } // Sort arr arr.sort((a,b)=> (a - b)) let maxDiff = 0 let temp = n // Iterate from 0 to n - 1 for (let i = 0; i < n; i++) { if (temp > hashmap[arr[i]][0]) { temp = hashmap[arr[i]][0] } maxDiff = Math.max( maxDiff,hashmap[arr[i]][hashmap[arr[i]].length - 1]- temp ) } return maxDiff} // Driver Code let n = 9const arr = [ 34, 8, 10, 3, 2, 80, 30, 33, 1 ] // Function Calllet ans = maxIndexDiff(arr, n)document.write(`The maxIndexDiff is : ${ans}`) // This code is contributed by shinjanpatra </script>",
"e": 44932,
"s": 43998,
"text": null
},
{
"code": null,
"e": 44956,
"s": 44932,
"text": "The maxIndexDiff is : 6"
},
{
"code": null,
"e": 44987,
"s": 44956,
"text": "Time complexity : O(N*log(N)) "
},
{
"code": null,
"e": 45972,
"s": 44987,
"text": "Method 4 (Efficient) To solve this problem, we need to get two optimum indexes of arr[]: left index i and right index j. For an element arr[i], we do not need to consider arr[i] for left index if there is an element smaller than arr[i] on left side of arr[i]. Similarly, if there is a greater element on right side of arr[j] then we do not need to consider this j for right index. So we construct two auxiliary arrays LMin[] and RMax[] such that LMin[i] holds the smallest element on left side of arr[i] including arr[i], and RMax[j] holds the greatest element on right side of arr[j] including arr[j]. After constructing these two auxiliary arrays, we traverse both of these arrays from left to right. While traversing LMin[] and RMax[] if we see that LMin[i] is greater than RMax[j], then we must move ahead in LMin[] (or do i++) because all elements on left of LMin[i] are greater than or equal to LMin[i]. Otherwise we must move ahead in RMax[j] to look for a greater j β i value."
},
{
"code": null,
"e": 46037,
"s": 45972,
"text": "Thanks to celicom for suggesting the algorithm for this method. "
},
{
"code": null,
"e": 46054,
"s": 46037,
"text": "Working Example:"
},
{
"code": null,
"e": 46101,
"s": 46054,
"text": "Lets consider any example [7 3 1 8 9 10 4 5 6]"
},
{
"code": null,
"e": 46120,
"s": 46101,
"text": "what is maxRight ?"
},
{
"code": null,
"e": 46215,
"s": 46120,
"text": "Filling from right side 6 is first element now 6 > 5 so again we fill 6 till we reach 10 > 6 :"
},
{
"code": null,
"e": 46254,
"s": 46215,
"text": "[10 10 10 10 10 10 6 6 6] this is maxR"
},
{
"code": null,
"e": 46288,
"s": 46254,
"text": "[7 3 1 1 1 1 1 1 1 ] this is minL"
},
{
"code": null,
"e": 46356,
"s": 46288,
"text": "now we see that how to reach answer from these to and its proof !!!"
},
{
"code": null,
"e": 46417,
"s": 46356,
"text": "lets compare first elements of the arrays now we see 10 > 7,"
},
{
"code": null,
"e": 46488,
"s": 46417,
"text": "now we increase maxR by 1 till it becomes lesser than 7 i.e at index 5"
},
{
"code": null,
"e": 46522,
"s": 46488,
"text": "hence answer till now is. 5-0 = 5"
},
{
"code": null,
"e": 46657,
"s": 46522,
"text": "now we will increase minL we get 3 which is lesser than 6 so we increase maxR till it reaches last index and the answer becomes 8-1= 7"
},
{
"code": null,
"e": 46702,
"s": 46657,
"text": "so we see how we are getting correct answer."
},
{
"code": null,
"e": 46848,
"s": 46702,
"text": "As we need the max difference j β i such that A[i]<= A[j], hence we do not need to consider element after the index j and element before index i."
},
{
"code": null,
"e": 46881,
"s": 46848,
"text": "in previous hint, make 2 arrays,"
},
{
"code": null,
"e": 46945,
"s": 46881,
"text": "First, will store smallest occurring element before the element"
},
{
"code": null,
"e": 47008,
"s": 46945,
"text": "Second, will store largest occurring element after the element"
},
{
"code": null,
"e": 47222,
"s": 47008,
"text": "Traverse the Second array, till the element in second array is larger than or equal to First array, and store the index difference. And if it becomes smaller, traverse the first array till it again becomes larger."
},
{
"code": null,
"e": 47277,
"s": 47222,
"text": "And store the max difference of this index difference."
},
{
"code": null,
"e": 47281,
"s": 47277,
"text": "C++"
},
{
"code": null,
"e": 47283,
"s": 47281,
"text": "C"
},
{
"code": null,
"e": 47288,
"s": 47283,
"text": "Java"
},
{
"code": null,
"e": 47296,
"s": 47288,
"text": "Python3"
},
{
"code": null,
"e": 47299,
"s": 47296,
"text": "C#"
},
{
"code": null,
"e": 47303,
"s": 47299,
"text": "PHP"
},
{
"code": null,
"e": 47314,
"s": 47303,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff; int i, j; int* LMin = new int[(sizeof(int) * n)]; int* RMax = new int[(sizeof(int) * n)]; /* Construct LMin[] such that LMin[i] stores the minimum value from (arr[0], arr[1], ... arr[i]) */ LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); /* Construct RMax[] such that RMax[j] stores the maximum value from (arr[j], arr[j+1], ..arr[n-1]) */ RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); /* Traverse both arrays from left to right to find optimum j - i. This process is similar to merge() of MergeSort */ i = 0, j = 0, maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff;} // Driver Codeint main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << maxDiff; return 0;} // This code is contributed by rathbhupendra",
"e": 48619,
"s": 47314,
"text": null
},
{
"code": "#include <stdio.h> /* Utility Functions to get max and minimum of two integers */int max(int x, int y){ return x > y ? x : y;} int min(int x, int y){ return x < y ? x : y;} /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int maxDiff; int i, j; int* LMin = (int*)malloc(sizeof(int) * n); int* RMax = (int*)malloc(sizeof(int) * n); /* Construct LMin[] such that LMin[i] stores the minimum value from (arr[0], arr[1], ... arr[i]) */ LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); /* Construct RMax[] such that RMax[j] stores the maximum value from (arr[j], arr[j+1], ..arr[n-1]) */ RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); /* Traverse both arrays from left to right to find optimum j - i This process is similar to merge() of MergeSort */ i = 0, j = 0, maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff;} /* Driver program to test above functions */int main(){ int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); printf(\"\\n %d\", maxDiff); getchar(); return 0;}",
"e": 50053,
"s": 48619,
"text": null
},
{
"code": "class FindMaximum { /* Utility Functions to get max and minimum of two integers */ int max(int x, int y) { return x > y ? x : y; } int min(int x, int y) { return x < y ? x : y; } /* For a given array arr[], returns the maximum j-i such that arr[j] > arr[i] */ int maxIndexDiff(int arr[], int n) { int maxDiff; int i, j; int RMax[] = new int[n]; int LMin[] = new int[n]; /* Construct LMin[] such that LMin[i] stores the minimum value from (arr[0], arr[1], ... arr[i]) */ LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); /* Construct RMax[] such that RMax[j] stores the maximum value from (arr[j], arr[j+1], ..arr[n-1]) */ RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); /* Traverse both arrays from left to right to find optimum j - i This process is similar to merge() of MergeSort */ i = 0; j = 0; maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff; } /* Driver program to test the above functions */ public static void main(String[] args) { FindMaximum max = new FindMaximum(); int arr[] = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.length; int maxDiff = max.maxIndexDiff(arr, n); System.out.println(maxDiff); }}",
"e": 51692,
"s": 50053,
"text": null
},
{
"code": "# Utility Functions to get max# and minimum of two integersdef max(a, b): if(a > b): return a else: return b def min(a, b): if(a < b): return a else: return b # For a given array arr[],# returns the maximum j - i# such that arr[j] > arr[i]def maxIndexDiff(arr, n): maxDiff = 0; LMin = [0] * n RMax = [0] * n # Construct LMin[] such that # LMin[i] stores the minimum # value from (arr[0], arr[1], # ... arr[i]) LMin[0] = arr[0] for i in range(1, n): LMin[i] = min(arr[i], LMin[i - 1]) # Construct RMax[] such that # RMax[j] stores the maximum # value from (arr[j], arr[j + 1], # ..arr[n-1]) RMax[n - 1] = arr[n - 1] for j in range(n - 2, -1, -1): RMax[j] = max(arr[j], RMax[j + 1]); # Traverse both arrays from left # to right to find optimum j - i # This process is similar to # merge() of MergeSort i, j = 0, 0 maxDiff = -1 while (j < n and i < n): if (LMin[i] <= RMax[j]): maxDiff = max(maxDiff, j - i) j = j + 1 else: i = i + 1 return maxDiff # Driver Codeif(__name__ == '__main__'): arr = [9, 2, 3, 4, 5, 6, 7, 8, 18, 0] n = len(arr) maxDiff = maxIndexDiff(arr, n) print (maxDiff) # This code is contributed# by gautam karakoti",
"e": 53020,
"s": 51692,
"text": null
},
{
"code": "// C# program to find the maximum// j β i such that arr[j] > arr[i]using System; class GFG { // Utility Functions to get max // and minimum of two integers static int max(int x, int y) { return x > y ? x : y; } static int min(int x, int y) { return x < y ? x : y; } // For a given array arr[], returns // the maximum j-i such thatarr[j] > arr[i] static int maxIndexDiff(int[] arr, int n) { int maxDiff; int i, j; int[] RMax = new int[n]; int[] LMin = new int[n]; // Construct LMin[] such that LMin[i] // stores the minimum value // from (arr[0], arr[1], ... arr[i]) LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); // Construct RMax[] such that // RMax[j] stores the maximum value // from (arr[j], arr[j+1], ..arr[n-1]) RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); // Traverse both arrays from left // to right to find optimum j - i // This process is similar to merge() // of MergeSort i = 0; j = 0; maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff; } // Driver program public static void Main() { int[] arr = { 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 }; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }}// This Code is Contributed by Sam007",
"e": 54724,
"s": 53020,
"text": null
},
{
"code": "<?php// PHP program to find the maximum// j β i such that arr[j] > arr[i] // For a given array arr[],// returns the maximum j - i// such that arr[j] > arr[i]function maxIndexDiff($arr, $n){ $maxDiff = 0; $LMin = array_fill(0, $n, NULL); $RMax = array_fill(0, $n, NULL); // Construct LMin[] such that // LMin[i] stores the minimum // value from (arr[0], arr[1], // ... arr[i]) $LMin[0] = $arr[0]; for($i = 1; $i < $n; $i++) $LMin[$i] = min($arr[$i], $LMin[$i - 1]); // Construct RMax[] such that // RMax[j] stores the maximum // value from (arr[j], arr[j+1], // ..arr[n-1]) $RMax[$n - 1] = $arr[$n - 1]; for($j = $n - 2; $j >= 0; $j--) $RMax[$j] = max($arr[$j], $RMax[$j + 1]); // Traverse both arrays from left // to right to find optimum j - i // This process is similar to // merge() of MergeSort $i = 0; $j = 0; $maxDiff = -1; while ($j < $n && $i < $n) if ($LMin[$i] <= $RMax[$j]) { $maxDiff = max($maxDiff, $j - $i); $j = $j + 1; } else $i = $i + 1; return $maxDiff;} // Driver Code$arr = array(9, 2, 3, 4, 5, 6, 7, 8, 18, 0);$n = sizeof($arr);$maxDiff = maxIndexDiff($arr, $n);echo $maxDiff; // This code is contributed// by ChitraNayal?>",
"e": 56078,
"s": 54724,
"text": null
},
{
"code": "<script> // Javascript program to find the maximum // j β i such that arr[j] > arr[i] // Utility Functions to get max // and minimum of two integers function max(x, y) { return x > y ? x : y; } function min(x, y) { return x < y ? x : y; } // For a given array arr[], returns // the maximum j-i such thatarr[j] > arr[i] function maxIndexDiff(arr, n) { let maxDiff; let i, j; let RMax = new Array(n); let LMin = new Array(n); // Construct LMin[] such that LMin[i] // stores the minimum value // from (arr[0], arr[1], ... arr[i]) LMin[0] = arr[0]; for (i = 1; i < n; ++i) LMin[i] = min(arr[i], LMin[i - 1]); // Construct RMax[] such that // RMax[j] stores the maximum value // from (arr[j], arr[j+1], ..arr[n-1]) RMax[n - 1] = arr[n - 1]; for (j = n - 2; j >= 0; --j) RMax[j] = max(arr[j], RMax[j + 1]); // Traverse both arrays from left // to right to find optimum j - i // This process is similar to merge() // of MergeSort i = 0; j = 0; maxDiff = -1; while (j < n && i < n) { if (LMin[i] <= RMax[j]) { maxDiff = max(maxDiff, j - i); j = j + 1; } else i = i + 1; } return maxDiff; } let arr = [ 9, 2, 3, 4, 5, 6, 7, 8, 18, 0 ]; let n = arr.length; let maxDiff = maxIndexDiff(arr, n); document.write(maxDiff);</script>",
"e": 57658,
"s": 56078,
"text": null
},
{
"code": null,
"e": 57660,
"s": 57658,
"text": "8"
},
{
"code": null,
"e": 57705,
"s": 57660,
"text": "Time Complexity: O(n) Auxiliary Space: O(n) "
},
{
"code": null,
"e": 57823,
"s": 57705,
"text": "Please write comments if you find the above codes/algorithms incorrect, or find other ways to solve the same problem."
},
{
"code": null,
"e": 57988,
"s": 57823,
"text": "We consider an auxiliary array : rightMax[] , such that, rightMax[i] = max element of the subarray arr[i...(n-1)], the largest or equal element after arr[i] element"
},
{
"code": null,
"e": 58190,
"s": 57988,
"text": "Suppose (arr[i], arr[jLast] ) is a pair, such that arr[jLast] is the last greater or equal element than arr[i]. For the pairs ending with arr[jLast] : ( arr[k], arr[jLast] ) for all k = (i+1) to jLast"
},
{
"code": null,
"e": 58281,
"s": 58190,
"text": "we donβt need to consider (jLast β k) because (jLast β i ) > (jLast β k) for all such kβs."
},
{
"code": null,
"e": 58309,
"s": 58281,
"text": "So we can skip those pairs."
},
{
"code": null,
"e": 58530,
"s": 58309,
"text": "Traversing from left to right of both arrays : arr[] and rightMax[] , when we first encounter rightMax[j] < arr[i[ , we know that jLast = j-1, and we can skip the pairs (arr[k], arr[jLast]) for all k = (i+1) to jLast. "
},
{
"code": null,
"e": 58810,
"s": 58530,
"text": "And also rightMax[] is non increasing sequence , so all elements at right side of rightMax[j] is smaller than or equal to rightMax[j]. But there may be arr[x] after arr[i] (x > i) such that arr[x] < rightMax[j] for x > i, so increment i when rightMax[j] < arr[i] is encountered."
},
{
"code": null,
"e": 58814,
"s": 58810,
"text": "C++"
},
{
"code": null,
"e": 58819,
"s": 58814,
"text": "Java"
},
{
"code": null,
"e": 58827,
"s": 58819,
"text": "Python3"
},
{
"code": null,
"e": 58830,
"s": 58827,
"text": "C#"
},
{
"code": null,
"e": 58841,
"s": 58830,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int rightMax[n]; rightMax[n-1]= arr[n-1]; for(int i = n-2; i>=0; i--) rightMax[i] = max(rightMax[i+1] , arr[i]); //rightMax[i] = max{ arr[i...(n-1] } int maxDist = INT_MIN; int i = 0, j = 0; while(i<n && j<n) { if(rightMax[j] >= arr[i]) { maxDist = max( maxDist, j-i ); j++; } else // if(rightMax[j] < leftMin[i]) i++; } return maxDist; } // Driver Codeint main(){ int arr[] = { 34,8,10,3,2,80,30,33,1}; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << maxDiff; return 0;} // This code is contributed by Sourashis Mondal",
"e": 59795,
"s": 58841,
"text": null
},
{
"code": "import java.util.*; class GFG{ /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */ static int maxIndexDiff(int arr[], int n) { int []rightMax = new int[n]; rightMax[n-1]= arr[n-1]; for(int i = n-2; i>=0; i--) rightMax[i] = Math.max(rightMax[i+1] , arr[i]); // rightMax[i] = max{ arr[i...(n-1] } int maxDist = Integer.MIN_VALUE; int i = 0, j = 0; while(i < n && j < n) { if(rightMax[j] >= arr[i]) { maxDist = Math.max( maxDist, j-i ); j++; } else // if(rightMax[j] < leftMin[i]) i++; } return maxDist; } // Driver Code public static void main(String[] args) { int arr[] = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.length; int maxDiff = maxIndexDiff(arr, n); System.out.print(maxDiff); }} // This code is contributed by Rajput-Ji",
"e": 60666,
"s": 59795,
"text": null
},
{
"code": "# For a given array arr[], returns the# maximum j β i such that arr[j] > arr[i]def maxIndexDiff(arr, n): rightMax = [0] * n rightMax[n - 1] = arr[n - 1] for i in range(n - 2, -1, -1): rightMax[i] = max(rightMax[i + 1], arr[i]) # rightMax[i] = max arr[i...(n-1] maxDist = -2**31 i = 0 j = 0 while (i < n and j < n): if (rightMax[j] >= arr[i]): maxDist = max(maxDist, j - i) j += 1 else: # if(rightMax[j] < leftMin[i]) i += 1 return maxDist # Driver Codearr = [ 34, 8, 10, 3, 2, 80, 30, 33, 1 ]n = len(arr)maxDiff = maxIndexDiff(arr, n) print(maxDiff) # This code is contributed by Shubham Singh",
"e": 61402,
"s": 60666,
"text": null
},
{
"code": "/* For a given array arr[], returns the maximum j β i such thatarr[j] > arr[i] */using System; public class GFG{ static int maxIndexDiff(int[] arr, int n) { int []rightMax = new int[n]; rightMax[n - 1] = arr[n - 1]; int i = 0, j = 0; for(i = n - 2; i >= 0; i--) rightMax[i] = Math.Max(rightMax[i+1] , arr[i]); // rightMax[i] = max{ arr[i...(n-1] } int maxDist = Int32.MinValue; i = 0; while(i < n && j < n) { if(rightMax[j] >= arr[i]) { maxDist = Math.Max( maxDist, j - i); j++; } else // if(rightMax[j] < leftMin[i]) i++; } return maxDist; } // Driver Code public static void Main() { int[] arr = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }} // This code is contributed by Shubham Singh",
"e": 62266,
"s": 61402,
"text": null
},
{
"code": "<script> /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */function maxIndexDiff(arr, n){ var rightMax = new Array(n).fill(0);; rightMax[n - 1] = arr[n - 1]; for(var i = n - 2; i >= 0; i--){ rightMax[i] = Math.max(rightMax[i+1] , arr[i]); } // rightMax[i] = max{ arr[i...(n-1] } var maxDist = Number.MIN_VALUE; var i = 0; var j = 0; while(i < n && j < n) { if(rightMax[j] >= arr[i]) { maxDist = Math.max( maxDist, j-i ); j++; } else // if(rightMax[j] < leftMin[i]) { i++; } } return maxDist; } // Driver Codevar arr = [ 34,8,10,3,2,80,30,33,1];var n = arr.length;var maxDiff = maxIndexDiff(arr, n);document.write(maxDiff); // This code is contributed by Shubham Singh </script>",
"e": 63100,
"s": 62266,
"text": null
},
{
"code": null,
"e": 63102,
"s": 63100,
"text": "6"
},
{
"code": null,
"e": 63209,
"s": 63102,
"text": "We can also do this using leftMin[] array only , where leftMin[i] = min element of the subarray arr[0...i]"
},
{
"code": null,
"e": 63213,
"s": 63209,
"text": "C++"
},
{
"code": null,
"e": 63218,
"s": 63213,
"text": "Java"
},
{
"code": null,
"e": 63226,
"s": 63218,
"text": "Python3"
},
{
"code": null,
"e": 63229,
"s": 63226,
"text": "C#"
},
{
"code": null,
"e": 63240,
"s": 63229,
"text": "Javascript"
},
{
"code": "#include <bits/stdc++.h>using namespace std; /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */int maxIndexDiff(int arr[], int n){ int leftMin[n] ; leftMin[0] = arr[0]; for(int i = 1 ; i<n; i++) leftMin[i] = min(leftMin[i-1], arr[i]); //leftMin[i] = min{ arr[0...i] } int maxDist = INT_MIN; int i = n-1, j = n-1; while(i>=0 && j>=0) { if(arr[j] >= leftMin[i]) { maxDist = max(maxDist, j-i); i--; } else j--; } return maxDist; } // Driver Codeint main(){ int arr[] = { 34,8,10,3,2,80,30,33,1}; int n = sizeof(arr) / sizeof(arr[0]); int maxDiff = maxIndexDiff(arr, n); cout << maxDiff; return 0;} // This code is contributed by Sourashis Mondal",
"e": 64050,
"s": 63240,
"text": null
},
{
"code": "import java.util.*;class GFG{ /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */ static int maxIndexDiff(int arr[], int n) { int []leftMin = new int[n]; leftMin[0] = arr[0]; for(int i = 1; i < n; i++) leftMin[i] = Math.min(leftMin[i - 1] , arr[i]); // leftMin[i] = min{ arr[i...(n-1] } int maxDist = Integer.MIN_VALUE; int i = n - 1, j = n - 1; while(i >= 0 && j >= 0) { if(arr[j] >= leftMin[i]) { maxDist = Math.max( maxDist, j - i ); i--; } else j--; } return maxDist; } // Driver Code public static void main(String[] args) { int arr[] = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.length; int maxDiff = maxIndexDiff(arr, n); System.out.print(maxDiff); }} // This code is contributed by Shubham Singh",
"e": 64895,
"s": 64050,
"text": null
},
{
"code": "# For a given array arr[], # returns the maximum j β i such that# arr[j] > arr[i] */def maxIndexDiff(arr, n): leftMin = [0]*n leftMin[0] = arr[0] for i in range(1,n): leftMin[i] = min(leftMin[i-1], arr[i]) # leftMin[i] = min arr[0...i] maxDist = - 2**32 i = n-1 j = n-1 while(i>=0 and j>=0): if(arr[j] >= leftMin[i]): maxDist = max(maxDist, j-i) i-=1 else: j-=1 return maxDist # Driver Codearr = [34,8,10,3,2,80,30,33,1]n = len(arr)maxDiff = maxIndexDiff(arr, n)print(maxDiff) # This code is contributed by Shubham Singh",
"e": 65545,
"s": 64895,
"text": null
},
{
"code": "using System; public class GFG{ /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */ static int maxIndexDiff(int[] arr, int n) { int []leftMin = new int[n]; leftMin[0] = arr[0]; int i,j; for( i = 1; i < n; i++) leftMin[i] = Math.Min(leftMin[i - 1] , arr[i]); // leftMin[i] = min{ arr[i...(n-1] } int maxDist = Int32.MinValue; i = n - 1; j = n - 1; while(i >= 0 && j >= 0) { if(arr[j] >= leftMin[i]) { maxDist = Math.Max( maxDist, j - i ); i--; } else j--; } return maxDist; } // Driver Code static public void Main () { int[] arr = {34, 8, 10, 3, 2, 80, 30, 33, 1}; int n = arr.Length; int maxDiff = maxIndexDiff(arr, n); Console.Write(maxDiff); }} // This code is contributed by Shubham Singh",
"e": 66382,
"s": 65545,
"text": null
},
{
"code": "<script> /* For a given array arr[], returns the maximum j β i such that arr[j] > arr[i] */function maxIndexDiff(arr, n){ var leftMin = new Array(n).fill(0);; leftMin[0] = arr[0]; for(var i = 1; i < n; i++){ leftMin[i] = Math.min(leftMin[i-1] , arr[i]); } // leftMin[i] = min{ arr[i...(n-1] } var maxDist = Number.MIN_VALUE; var i = n-1; var j = n-1; while(i >= 0 && j >= 0) { if(arr[j] >= leftMin[i]) { maxDist = Math.max( maxDist, j-i ); i--; } else // if(rightMax[j] < leftMin[i]) { j--; } } return maxDist; } // Driver Codevar arr = [ 34,8,10,3,2,80,30,33,1];var n = arr.length;var maxDiff = maxIndexDiff(arr, n);document.write(maxDiff); // This code is contributed by Shubham Singh </script>",
"e": 67203,
"s": 66382,
"text": null
},
{
"code": null,
"e": 67205,
"s": 67203,
"text": "6"
},
{
"code": null,
"e": 67212,
"s": 67205,
"text": "Sam007"
},
{
"code": null,
"e": 67228,
"s": 67212,
"text": "Gautam Karakoti"
},
{
"code": null,
"e": 67234,
"s": 67228,
"text": "ukasp"
},
{
"code": null,
"e": 67247,
"s": 67234,
"text": "Akanksha_Rai"
},
{
"code": null,
"e": 67261,
"s": 67247,
"text": "rathbhupendra"
},
{
"code": null,
"e": 67265,
"s": 67261,
"text": "msg"
},
{
"code": null,
"e": 67276,
"s": 67265,
"text": "executable"
},
{
"code": null,
"e": 67293,
"s": 67276,
"text": "shikhasingrajput"
},
{
"code": null,
"e": 67303,
"s": 67293,
"text": "Rajput-Ji"
},
{
"code": null,
"e": 67318,
"s": 67303,
"text": "winter_soldier"
},
{
"code": null,
"e": 67339,
"s": 67318,
"text": "avanitrachhadiya2155"
},
{
"code": null,
"e": 67347,
"s": 67339,
"text": "rag2127"
},
{
"code": null,
"e": 67356,
"s": 67347,
"text": "mank1083"
},
{
"code": null,
"e": 67367,
"s": 67356,
"text": "decode2207"
},
{
"code": null,
"e": 67376,
"s": 67367,
"text": "suresh07"
},
{
"code": null,
"e": 67392,
"s": 67376,
"text": "abhijit gupta 1"
},
{
"code": null,
"e": 67407,
"s": 67392,
"text": "2011harshgupta"
},
{
"code": null,
"e": 67421,
"s": 67407,
"text": "dheerwani2498"
},
{
"code": null,
"e": 67433,
"s": 67421,
"text": "sourashis69"
},
{
"code": null,
"e": 67448,
"s": 67433,
"text": "SHUBHAMSINGH10"
},
{
"code": null,
"e": 67464,
"s": 67448,
"text": "simranarora5sos"
},
{
"code": null,
"e": 67477,
"s": 67464,
"text": "shinjanpatra"
},
{
"code": null,
"e": 67493,
"s": 67477,
"text": "pushpeshrajdx01"
},
{
"code": null,
"e": 67500,
"s": 67493,
"text": "Amazon"
},
{
"code": null,
"e": 67509,
"s": 67500,
"text": "Snapdeal"
},
{
"code": null,
"e": 67516,
"s": 67509,
"text": "Arrays"
},
{
"code": null,
"e": 67523,
"s": 67516,
"text": "Amazon"
},
{
"code": null,
"e": 67532,
"s": 67523,
"text": "Snapdeal"
},
{
"code": null,
"e": 67539,
"s": 67532,
"text": "Arrays"
},
{
"code": null,
"e": 67637,
"s": 67539,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 67646,
"s": 67637,
"text": "Comments"
},
{
"code": null,
"e": 67659,
"s": 67646,
"text": "Old Comments"
},
{
"code": null,
"e": 67682,
"s": 67659,
"text": "Introduction to Arrays"
},
{
"code": null,
"e": 67714,
"s": 67682,
"text": "Multidimensional Arrays in Java"
},
{
"code": null,
"e": 67759,
"s": 67714,
"text": "Python | Using 2D arrays/lists the right way"
},
{
"code": null,
"e": 67780,
"s": 67759,
"text": "Linked List vs Array"
},
{
"code": null,
"e": 67871,
"s": 67780,
"text": "Given an array of size n and a number k, find all elements that appear more than n/k times"
},
{
"code": null,
"e": 67925,
"s": 67871,
"text": "Queue | Set 1 (Introduction and Array Implementation)"
},
{
"code": null,
"e": 67952,
"s": 67925,
"text": "Subset Sum Problem | DP-25"
},
{
"code": null,
"e": 68008,
"s": 67952,
"text": "K'th Smallest/Largest Element in Unsorted Array | Set 1"
},
{
"code": null,
"e": 68032,
"s": 68008,
"text": "Find the Missing Number"
}
] |
IPGeoLocation - Find IP address information in Kali Linux - GeeksforGeeks
|
17 Jun, 2021
IPGeoLocation is a free and open-source tool written in Python available on GitHub which is used to find information about the IP address on your Kali Linux operating system. It is used to retrieve IP Geolocation information. The tool is powered by the IP-API, which is an API used to retrieve information about IP Geolocation. This tool is written in python, so you must have python3, termcolor, colorama modules installed in your system in order to use the tool. This tool selects a random proxy on the new line and the IP geolocation maps that IP using Google Map. You can export CSV, XML, and txt formats.
Step 1: Open your Kali Linux operating system. Use the following command to install the tool.
git clone https://github.com/maldevel/IPGeoLocation.git
Step 2: Use the following command to move in the directory of the tool.
cd IPGeoLocation
ls
Step 3: Now the tool has been downloaded. Now you have to install the requirements using the following commands.
pip3 install -r requirements.txt --user
Step 4: Now you have to give permission to the tool using the following command.
chmod +x ipgeolocation.py
Step 3: All the requirements have been installed. To run the tool use the following commands.
./ipgeolocation.py
The tool is running successfully. Now we will see examples.
Example 1: Use the IPGeoLocation tool to know your own IP information.
./ipgeolocation.py -m
Example 2: Use the IPGeoLocation tool to find out the geolocation information of a domain.
./ipgeolocation.py -t google.com
You can see that the tool gives all the geolocation information about the target. You can also use the geolocation tool to find your own target geolocation information.
Kali-Linux
Linux-Tools
Linux-Unix
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Thread functions in C/C++
mv command in Linux with examples
nohup Command in Linux with Examples
scp command in Linux with Examples
Docker - COPY Instruction
chown command in Linux with Examples
nslookup command in Linux with Examples
SED command in Linux | Set 2
Named Pipe or FIFO with example C program
uniq Command in LINUX with examples
|
[
{
"code": null,
"e": 24040,
"s": 24012,
"text": "\n17 Jun, 2021"
},
{
"code": null,
"e": 24651,
"s": 24040,
"text": "IPGeoLocation is a free and open-source tool written in Python available on GitHub which is used to find information about the IP address on your Kali Linux operating system. It is used to retrieve IP Geolocation information. The tool is powered by the IP-API, which is an API used to retrieve information about IP Geolocation. This tool is written in python, so you must have python3, termcolor, colorama modules installed in your system in order to use the tool. This tool selects a random proxy on the new line and the IP geolocation maps that IP using Google Map. You can export CSV, XML, and txt formats."
},
{
"code": null,
"e": 24745,
"s": 24651,
"text": "Step 1: Open your Kali Linux operating system. Use the following command to install the tool."
},
{
"code": null,
"e": 24801,
"s": 24745,
"text": "git clone https://github.com/maldevel/IPGeoLocation.git"
},
{
"code": null,
"e": 24873,
"s": 24801,
"text": "Step 2: Use the following command to move in the directory of the tool."
},
{
"code": null,
"e": 24893,
"s": 24873,
"text": "cd IPGeoLocation\nls"
},
{
"code": null,
"e": 25006,
"s": 24893,
"text": "Step 3: Now the tool has been downloaded. Now you have to install the requirements using the following commands."
},
{
"code": null,
"e": 25046,
"s": 25006,
"text": "pip3 install -r requirements.txt --user"
},
{
"code": null,
"e": 25127,
"s": 25046,
"text": "Step 4: Now you have to give permission to the tool using the following command."
},
{
"code": null,
"e": 25153,
"s": 25127,
"text": "chmod +x ipgeolocation.py"
},
{
"code": null,
"e": 25247,
"s": 25153,
"text": "Step 3: All the requirements have been installed. To run the tool use the following commands."
},
{
"code": null,
"e": 25266,
"s": 25247,
"text": "./ipgeolocation.py"
},
{
"code": null,
"e": 25326,
"s": 25266,
"text": "The tool is running successfully. Now we will see examples."
},
{
"code": null,
"e": 25397,
"s": 25326,
"text": "Example 1: Use the IPGeoLocation tool to know your own IP information."
},
{
"code": null,
"e": 25420,
"s": 25397,
"text": "./ipgeolocation.py -m "
},
{
"code": null,
"e": 25511,
"s": 25420,
"text": "Example 2: Use the IPGeoLocation tool to find out the geolocation information of a domain."
},
{
"code": null,
"e": 25544,
"s": 25511,
"text": "./ipgeolocation.py -t google.com"
},
{
"code": null,
"e": 25713,
"s": 25544,
"text": "You can see that the tool gives all the geolocation information about the target. You can also use the geolocation tool to find your own target geolocation information."
},
{
"code": null,
"e": 25724,
"s": 25713,
"text": "Kali-Linux"
},
{
"code": null,
"e": 25736,
"s": 25724,
"text": "Linux-Tools"
},
{
"code": null,
"e": 25747,
"s": 25736,
"text": "Linux-Unix"
},
{
"code": null,
"e": 25845,
"s": 25747,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 25854,
"s": 25845,
"text": "Comments"
},
{
"code": null,
"e": 25867,
"s": 25854,
"text": "Old Comments"
},
{
"code": null,
"e": 25893,
"s": 25867,
"text": "Thread functions in C/C++"
},
{
"code": null,
"e": 25927,
"s": 25893,
"text": "mv command in Linux with examples"
},
{
"code": null,
"e": 25964,
"s": 25927,
"text": "nohup Command in Linux with Examples"
},
{
"code": null,
"e": 25999,
"s": 25964,
"text": "scp command in Linux with Examples"
},
{
"code": null,
"e": 26025,
"s": 25999,
"text": "Docker - COPY Instruction"
},
{
"code": null,
"e": 26062,
"s": 26025,
"text": "chown command in Linux with Examples"
},
{
"code": null,
"e": 26102,
"s": 26062,
"text": "nslookup command in Linux with Examples"
},
{
"code": null,
"e": 26131,
"s": 26102,
"text": "SED command in Linux | Set 2"
},
{
"code": null,
"e": 26173,
"s": 26131,
"text": "Named Pipe or FIFO with example C program"
}
] |
How to create sloping lines using CSS? - GeeksforGeeks
|
01 Oct, 2020
Prerequisite- CSS
These sloping lines are easily implemented using background-image property in CSS,
Normal Colored Diagonal Stripes: Here, the diagonal stripes constructed using repeating-linear-gradient() in CSS.Example:<!DOCTYPE html><html ><head> <meta charset="utf-8"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of sloping line*/ 45deg, /*First Color of the Stripe*/ #fff, #fff 10px, /*Second Color of the Stripe*/ #aed581 10px, #aed581 20px );}</style> </head> <body> <div class="module"> <h2 class="stripe-1">GFG</h2> </div></body></html>Output:
Normal Colored Diagonal Stripes: Here, the diagonal stripes constructed using repeating-linear-gradient() in CSS.
Example:
<!DOCTYPE html><html ><head> <meta charset="utf-8"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of sloping line*/ 45deg, /*First Color of the Stripe*/ #fff, #fff 10px, /*Second Color of the Stripe*/ #aed581 10px, #aed581 20px );}</style> </head> <body> <div class="module"> <h2 class="stripe-1">GFG</h2> </div></body></html>
Output:
Gradient Diagonal Stripes: This makes half the stripes totally transparent using repeating-linear-gradient(), it can appear as if the stripes have gradients. Example:<!DOCTYPE html><html ><head> <meta charset="utf-8"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of the slope line*/ 45deg, /*To make The stripe transparent at the end*/ transparent, transparent 10px, #ccc 10px, #ccc 20px ), linear-gradient( to bottom, /*Color of the Stripe*/ green, green );}</style> </head> <body> <div class="module"> <h2 class="stripe-1">GFG</h2> </div></body></html>Output:
Gradient Diagonal Stripes: This makes half the stripes totally transparent using repeating-linear-gradient(), it can appear as if the stripes have gradients.
Example:
<!DOCTYPE html><html ><head> <meta charset="utf-8"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of the slope line*/ 45deg, /*To make The stripe transparent at the end*/ transparent, transparent 10px, #ccc 10px, #ccc 20px ), linear-gradient( to bottom, /*Color of the Stripe*/ green, green );}</style> </head> <body> <div class="module"> <h2 class="stripe-1">GFG</h2> </div></body></html>
Output:
Radial Stripes: These stripes are implemented using radial gradients can be use under repeating-linear-gradients().Example:<!DOCTYPE html><html ><head> <meta charset="utf-8"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-radial-gradient( /*Shape of the repeating lines*/ circle, /*First Color of the stripe*/ green, green 10px, /*Second Color of the stripe*/ #aed581 10px, #aed581 20px); }</style> </head> <body> <div class="module"> <h2 class="stripe-1">GFG</h2> </div></body></html>Output:
Radial Stripes: These stripes are implemented using radial gradients can be use under repeating-linear-gradients().
Example:
<!DOCTYPE html><html ><head> <meta charset="utf-8"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-radial-gradient( /*Shape of the repeating lines*/ circle, /*First Color of the stripe*/ green, green 10px, /*Second Color of the stripe*/ #aed581 10px, #aed581 20px); }</style> </head> <body> <div class="module"> <h2 class="stripe-1">GFG</h2> </div></body></html>
Output:
CSS-Misc
CSS
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Design a web page using HTML and CSS
Form validation using jQuery
Search Bar using HTML, CSS and JavaScript
How to set space between the flexbox ?
How to Create Time-Table schedule using HTML ?
Roadmap to Become a Web Developer in 2022
Installation of Node.js on Linux
How to fetch data from an API in ReactJS ?
Top 10 Angular Libraries For Web Developers
Convert a string to an integer in JavaScript
|
[
{
"code": null,
"e": 25320,
"s": 25292,
"text": "\n01 Oct, 2020"
},
{
"code": null,
"e": 25338,
"s": 25320,
"text": "Prerequisite- CSS"
},
{
"code": null,
"e": 25421,
"s": 25338,
"text": "These sloping lines are easily implemented using background-image property in CSS,"
},
{
"code": null,
"e": 26100,
"s": 25421,
"text": "Normal Colored Diagonal Stripes: Here, the diagonal stripes constructed using repeating-linear-gradient() in CSS.Example:<!DOCTYPE html><html ><head> <meta charset=\"utf-8\"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of sloping line*/ 45deg, /*First Color of the Stripe*/ #fff, #fff 10px, /*Second Color of the Stripe*/ #aed581 10px, #aed581 20px );}</style> </head> <body> <div class=\"module\"> <h2 class=\"stripe-1\">GFG</h2> </div></body></html>Output:"
},
{
"code": null,
"e": 26214,
"s": 26100,
"text": "Normal Colored Diagonal Stripes: Here, the diagonal stripes constructed using repeating-linear-gradient() in CSS."
},
{
"code": null,
"e": 26223,
"s": 26214,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html ><head> <meta charset=\"utf-8\"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of sloping line*/ 45deg, /*First Color of the Stripe*/ #fff, #fff 10px, /*Second Color of the Stripe*/ #aed581 10px, #aed581 20px );}</style> </head> <body> <div class=\"module\"> <h2 class=\"stripe-1\">GFG</h2> </div></body></html>",
"e": 26774,
"s": 26223,
"text": null
},
{
"code": null,
"e": 26782,
"s": 26774,
"text": "Output:"
},
{
"code": null,
"e": 27606,
"s": 26782,
"text": "Gradient Diagonal Stripes: This makes half the stripes totally transparent using repeating-linear-gradient(), it can appear as if the stripes have gradients. Example:<!DOCTYPE html><html ><head> <meta charset=\"utf-8\"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of the slope line*/ 45deg, /*To make The stripe transparent at the end*/ transparent, transparent 10px, #ccc 10px, #ccc 20px ), linear-gradient( to bottom, /*Color of the Stripe*/ green, green );}</style> </head> <body> <div class=\"module\"> <h2 class=\"stripe-1\">GFG</h2> </div></body></html>Output:"
},
{
"code": null,
"e": 27765,
"s": 27606,
"text": "Gradient Diagonal Stripes: This makes half the stripes totally transparent using repeating-linear-gradient(), it can appear as if the stripes have gradients. "
},
{
"code": null,
"e": 27774,
"s": 27765,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html ><head> <meta charset=\"utf-8\"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-linear-gradient( /*Angle of the slope line*/ 45deg, /*To make The stripe transparent at the end*/ transparent, transparent 10px, #ccc 10px, #ccc 20px ), linear-gradient( to bottom, /*Color of the Stripe*/ green, green );}</style> </head> <body> <div class=\"module\"> <h2 class=\"stripe-1\">GFG</h2> </div></body></html>",
"e": 28425,
"s": 27774,
"text": null
},
{
"code": null,
"e": 28433,
"s": 28425,
"text": "Output:"
},
{
"code": null,
"e": 29117,
"s": 28433,
"text": "Radial Stripes: These stripes are implemented using radial gradients can be use under repeating-linear-gradients().Example:<!DOCTYPE html><html ><head> <meta charset=\"utf-8\"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-radial-gradient( /*Shape of the repeating lines*/ circle, /*First Color of the stripe*/ green, green 10px, /*Second Color of the stripe*/ #aed581 10px, #aed581 20px); }</style> </head> <body> <div class=\"module\"> <h2 class=\"stripe-1\">GFG</h2> </div></body></html>Output:"
},
{
"code": null,
"e": 29233,
"s": 29117,
"text": "Radial Stripes: These stripes are implemented using radial gradients can be use under repeating-linear-gradients()."
},
{
"code": null,
"e": 29242,
"s": 29233,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html ><head> <meta charset=\"utf-8\"> <style> .module { background: white; border: 1px solid #ccc; margin: 3%; width: 40%; > h2 { padding: 1rem; margin: 0 0 0.5rem 0; } } .stripe-1 { color: white; background: repeating-radial-gradient( /*Shape of the repeating lines*/ circle, /*First Color of the stripe*/ green, green 10px, /*Second Color of the stripe*/ #aed581 10px, #aed581 20px); }</style> </head> <body> <div class=\"module\"> <h2 class=\"stripe-1\">GFG</h2> </div></body></html>",
"e": 29796,
"s": 29242,
"text": null
},
{
"code": null,
"e": 29804,
"s": 29796,
"text": "Output:"
},
{
"code": null,
"e": 29813,
"s": 29804,
"text": "CSS-Misc"
},
{
"code": null,
"e": 29817,
"s": 29813,
"text": "CSS"
},
{
"code": null,
"e": 29834,
"s": 29817,
"text": "Web Technologies"
},
{
"code": null,
"e": 29932,
"s": 29834,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 29941,
"s": 29932,
"text": "Comments"
},
{
"code": null,
"e": 29954,
"s": 29941,
"text": "Old Comments"
},
{
"code": null,
"e": 29991,
"s": 29954,
"text": "Design a web page using HTML and CSS"
},
{
"code": null,
"e": 30020,
"s": 29991,
"text": "Form validation using jQuery"
},
{
"code": null,
"e": 30062,
"s": 30020,
"text": "Search Bar using HTML, CSS and JavaScript"
},
{
"code": null,
"e": 30101,
"s": 30062,
"text": "How to set space between the flexbox ?"
},
{
"code": null,
"e": 30148,
"s": 30101,
"text": "How to Create Time-Table schedule using HTML ?"
},
{
"code": null,
"e": 30190,
"s": 30148,
"text": "Roadmap to Become a Web Developer in 2022"
},
{
"code": null,
"e": 30223,
"s": 30190,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 30266,
"s": 30223,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 30310,
"s": 30266,
"text": "Top 10 Angular Libraries For Web Developers"
}
] |
DAX Text - CONCATENATE function
|
Joins two text strings into one text string.
CONCATENATE (<text1>, <text2>)
The text strings to be joined into a single text string.
Text strings can be
Text, or
Numbers, or
Column references.
The concatenated string.
DAX CONCATENATE function accepts only two arguments.
If you need to concatenate multiple columns, you can either nest the CONCATENATE functions or use the text concatenation operator (&) to join all of them.
If you want to use text strings directly, rather than using column references, you must enclose each text string in double quotation marks.
= CONCATENATE ("John ","Lever") returns John Lever.
= CONCATENATE (LEFT ([Product], 5), [No. of Units])
returns a calculated column with values β left 5 characters of Product concatenated with No. of Units in the corresponding row.
53 Lectures
5.5 hours
Abhay Gadiya
24 Lectures
2 hours
Randy Minder
26 Lectures
4.5 hours
Randy Minder
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2046,
"s": 2001,
"text": "Joins two text strings into one text string."
},
{
"code": null,
"e": 2079,
"s": 2046,
"text": "CONCATENATE (<text1>, <text2>) \n"
},
{
"code": null,
"e": 2136,
"s": 2079,
"text": "The text strings to be joined into a single text string."
},
{
"code": null,
"e": 2156,
"s": 2136,
"text": "Text strings can be"
},
{
"code": null,
"e": 2165,
"s": 2156,
"text": "Text, or"
},
{
"code": null,
"e": 2177,
"s": 2165,
"text": "Numbers, or"
},
{
"code": null,
"e": 2196,
"s": 2177,
"text": "Column references."
},
{
"code": null,
"e": 2221,
"s": 2196,
"text": "The concatenated string."
},
{
"code": null,
"e": 2274,
"s": 2221,
"text": "DAX CONCATENATE function accepts only two arguments."
},
{
"code": null,
"e": 2429,
"s": 2274,
"text": "If you need to concatenate multiple columns, you can either nest the CONCATENATE functions or use the text concatenation operator (&) to join all of them."
},
{
"code": null,
"e": 2569,
"s": 2429,
"text": "If you want to use text strings directly, rather than using column references, you must enclose each text string in double quotation marks."
},
{
"code": null,
"e": 2673,
"s": 2569,
"text": "= CONCATENATE (\"John \",\"Lever\") returns John Lever.\n= CONCATENATE (LEFT ([Product], 5), [No. of Units])"
},
{
"code": null,
"e": 2801,
"s": 2673,
"text": "returns a calculated column with values β left 5 characters of Product concatenated with No. of Units in the corresponding row."
},
{
"code": null,
"e": 2836,
"s": 2801,
"text": "\n 53 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 2850,
"s": 2836,
"text": " Abhay Gadiya"
},
{
"code": null,
"e": 2883,
"s": 2850,
"text": "\n 24 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 2897,
"s": 2883,
"text": " Randy Minder"
},
{
"code": null,
"e": 2932,
"s": 2897,
"text": "\n 26 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 2946,
"s": 2932,
"text": " Randy Minder"
},
{
"code": null,
"e": 2953,
"s": 2946,
"text": " Print"
},
{
"code": null,
"e": 2964,
"s": 2953,
"text": " Add Notes"
}
] |
Binary search in C#
|
Binary search works on a sorted array. The value is compared with the middle element of the array. If equality is not found, then the half part is eliminated in which the value is not there. In the same way, the other half part is searched.
Here is the mid element in our array. Letβs say we need to find 62, then the left part would be eliminated and the right part is then searched β
These are the complexities of a binary search β
Let us see the method to implement the binary search β
public static object BinarySearchDisplay(int[] arr, int key) {
int minNum = 0;
int maxNum = arr.Length - 1;
while (minNum <=maxNum) {
int mid = (minNum + maxNum) / 2;
if (key == arr[mid]) {
return ++mid;
} else if (key < arr[mid]) {
max = mid - 1;
}else {
min = mid + 1;
}
}
return "None";
}
|
[
{
"code": null,
"e": 1303,
"s": 1062,
"text": "Binary search works on a sorted array. The value is compared with the middle element of the array. If equality is not found, then the half part is eliminated in which the value is not there. In the same way, the other half part is searched."
},
{
"code": null,
"e": 1448,
"s": 1303,
"text": "Here is the mid element in our array. Letβs say we need to find 62, then the left part would be eliminated and the right part is then searched β"
},
{
"code": null,
"e": 1496,
"s": 1448,
"text": "These are the complexities of a binary search β"
},
{
"code": null,
"e": 1551,
"s": 1496,
"text": "Let us see the method to implement the binary search β"
},
{
"code": null,
"e": 1916,
"s": 1551,
"text": "public static object BinarySearchDisplay(int[] arr, int key) {\n int minNum = 0;\n int maxNum = arr.Length - 1;\n\n while (minNum <=maxNum) {\n int mid = (minNum + maxNum) / 2;\n if (key == arr[mid]) {\n return ++mid;\n } else if (key < arr[mid]) {\n max = mid - 1;\n }else {\n min = mid + 1;\n }\n }\n return \"None\";\n}"
}
] |
SVG visibility Attribute - GeeksforGeeks
|
31 Mar, 2022
The visibility attribute allows you to control the visibility of graphical elements. It has effect only on the following elements <a>, <altGlyph>, <audio>, <canvas>, <circle>, <ellipse>, <foreignObject>, <iframe>, <image>, <line>, <path>, <polygon>, <polyline>, <rect>, <text>, <textPath>, <tref>, <tspan>, <video>
Syntax:
visibility = visible | hidden | collapse
Attribute Values: The visibility attribute accepts the values mentioned above and described below
visible: This value indicates that the element will be visible.
hidden: This value indicates that the element will not be visible.
collapse: This value is equivalent to hidden.
Example 1: Below is the example that illustrates the use of the visibility attribute.
HTML
Output:
Example 2: Below is the example that illustrates the use of the visibility attribute.
HTML
<!DOCTYPE html><html> <body> <h1 style="color:green; font-size:60px;"> GeeksforGeeks </h1> <svg viewBox="0 0 620 520" xmlns="http://www.w3.org/2000/svg"> <rect x="10" y="10" width="200" height="100" stroke="green" stroke-width="5" fill="transparent" /> <g stroke="seagreen" stroke-width="5" fill="lightgreen"> <rect x="20" y="20" width="80" height="80" visibility="hidden" /> <rect x="120" y="20" width="80" height="80" visibility="visible" /> </g> </svg> </body> </html>
Output:
Attention reader! Donβt stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course.
HTML-SVG
SVG-Attribute
HTML
Web Technologies
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to insert spaces/tabs in text using HTML/CSS?
How to set the default value for an HTML <select> element ?
How to update Node.js and NPM to next version ?
How to set input type date in dd-mm-yyyy format using HTML ?
Roadmap to Become a Web Developer in 2022
Installation of Node.js on Linux
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to fetch data from an API in ReactJS ?
Convert a string to an integer in JavaScript
|
[
{
"code": null,
"e": 28654,
"s": 28626,
"text": "\n31 Mar, 2022"
},
{
"code": null,
"e": 28969,
"s": 28654,
"text": "The visibility attribute allows you to control the visibility of graphical elements. It has effect only on the following elements <a>, <altGlyph>, <audio>, <canvas>, <circle>, <ellipse>, <foreignObject>, <iframe>, <image>, <line>, <path>, <polygon>, <polyline>, <rect>, <text>, <textPath>, <tref>, <tspan>, <video>"
},
{
"code": null,
"e": 28977,
"s": 28969,
"text": "Syntax:"
},
{
"code": null,
"e": 29019,
"s": 28977,
"text": "visibility = visible | hidden | collapse\n"
},
{
"code": null,
"e": 29117,
"s": 29019,
"text": "Attribute Values: The visibility attribute accepts the values mentioned above and described below"
},
{
"code": null,
"e": 29181,
"s": 29117,
"text": "visible: This value indicates that the element will be visible."
},
{
"code": null,
"e": 29248,
"s": 29181,
"text": "hidden: This value indicates that the element will not be visible."
},
{
"code": null,
"e": 29294,
"s": 29248,
"text": "collapse: This value is equivalent to hidden."
},
{
"code": null,
"e": 29380,
"s": 29294,
"text": "Example 1: Below is the example that illustrates the use of the visibility attribute."
},
{
"code": null,
"e": 29385,
"s": 29380,
"text": "HTML"
},
{
"code": null,
"e": 29395,
"s": 29387,
"text": "Output:"
},
{
"code": null,
"e": 29481,
"s": 29395,
"text": "Example 2: Below is the example that illustrates the use of the visibility attribute."
},
{
"code": null,
"e": 29486,
"s": 29481,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html> <body> <h1 style=\"color:green; font-size:60px;\"> GeeksforGeeks </h1> <svg viewBox=\"0 0 620 520\" xmlns=\"http://www.w3.org/2000/svg\"> <rect x=\"10\" y=\"10\" width=\"200\" height=\"100\" stroke=\"green\" stroke-width=\"5\" fill=\"transparent\" /> <g stroke=\"seagreen\" stroke-width=\"5\" fill=\"lightgreen\"> <rect x=\"20\" y=\"20\" width=\"80\" height=\"80\" visibility=\"hidden\" /> <rect x=\"120\" y=\"20\" width=\"80\" height=\"80\" visibility=\"visible\" /> </g> </svg> </body> </html>",
"e": 30141,
"s": 29486,
"text": null
},
{
"code": null,
"e": 30149,
"s": 30141,
"text": "Output:"
},
{
"code": null,
"e": 30286,
"s": 30149,
"text": "Attention reader! Donβt stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course."
},
{
"code": null,
"e": 30295,
"s": 30286,
"text": "HTML-SVG"
},
{
"code": null,
"e": 30309,
"s": 30295,
"text": "SVG-Attribute"
},
{
"code": null,
"e": 30314,
"s": 30309,
"text": "HTML"
},
{
"code": null,
"e": 30331,
"s": 30314,
"text": "Web Technologies"
},
{
"code": null,
"e": 30336,
"s": 30331,
"text": "HTML"
},
{
"code": null,
"e": 30434,
"s": 30336,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30496,
"s": 30434,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 30546,
"s": 30496,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
},
{
"code": null,
"e": 30606,
"s": 30546,
"text": "How to set the default value for an HTML <select> element ?"
},
{
"code": null,
"e": 30654,
"s": 30606,
"text": "How to update Node.js and NPM to next version ?"
},
{
"code": null,
"e": 30715,
"s": 30654,
"text": "How to set input type date in dd-mm-yyyy format using HTML ?"
},
{
"code": null,
"e": 30757,
"s": 30715,
"text": "Roadmap to Become a Web Developer in 2022"
},
{
"code": null,
"e": 30790,
"s": 30757,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 30852,
"s": 30790,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 30895,
"s": 30852,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
Spring AOP - Annotation Based Before Advice
|
@Before is an advice type which ensures that an advice runs before the method execution. Following is the syntax of @Before advice.
@Pointcut("execution(* com.tutorialspoint.Student.getName(..))")
private void selectGetName(){}
@Before("selectGetName()")
public void beforeAdvice(){
System.out.println("Going to setup student profile.");
}
Where,
@Pointcut β Mark a function as a Pointcut
@Pointcut β Mark a function as a Pointcut
execution( expression ) β Expression covering methods on which advice is to be applied.
execution( expression ) β Expression covering methods on which advice is to be applied.
@Before β Mark a function as an advice to be executed before method(s) covered by Pointcut.
@Before β Mark a function as an advice to be executed before method(s) covered by Pointcut.
To understand the above-mentioned concepts related to @Before Advice, let us write an example which will implement @Before Advice. To write our example with few advices, let us have a working Eclipse IDE in place and use the following steps to create a Spring application β
Following is the content of Logging.java file. This is actually a sample of aspect module, which defines the methods to be called at various points.
package com.tutorialspoint;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.aspectj.lang.annotation.Before;
@Aspect
public class Logging {
/** Following is the definition for a Pointcut to select
* all the methods available. So advice will be called
* for all the methods.
*/
@Pointcut("execution(* com.tutorialspoint.Student.getName(..))")
private void selectGetName(){}
/**
* This is the method which I would like to execute
* before a selected method execution.
*/
@Before("selectGetName()")
public void beforeAdvice(){
System.out.println("Going to setup student profile.");
}
}
Following is the content of the Student.java file.
package com.tutorialspoint;
public class Student {
private Integer age;
private String name;
public void setAge(Integer age) {
this.age = age;
}
public Integer getAge() {
System.out.println("Age : " + age );
return age;
}
public void setName(String name) {
this.name = name;
}
public String getName() {
System.out.println("Name : " + name );
return name;
}
public void printThrowException(){
System.out.println("Exception raised");
throw new IllegalArgumentException();
}
}
Following is the content of the MainApp.java file.
package com.tutorialspoint;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
public class MainApp {
public static void main(String[] args) {
ApplicationContext context = new ClassPathXmlApplicationContext("Beans.xml");
Student student = (Student) context.getBean("student");
student.getName();
student.getAge();
}
}
Following is the configuration file Beans.xml.
<?xml version = "1.0" encoding = "UTF-8"?>
<beans xmlns = "http://www.springframework.org/schema/beans"
xmlns:xsi = "http://www.w3.org/2001/XMLSchema-instance"
xmlns:aop = "http://www.springframework.org/schema/aop"
xsi:schemaLocation = "http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop-3.0.xsd ">
<aop:aspectj-autoproxy/>
<!-- Definition for student bean -->
<bean id = "student" class = "com.tutorialspoint.Student">
<property name = "name" value = "Zara" />
<property name = "age" value = "11"/>
</bean>
<!-- Definition for logging aspect -->
<bean id = "logging" class = "com.tutorialspoint.Logging"/>
</beans>
Once you are done creating the source and configuration files, run your application. Rightclick on MainApp.java in your application and use run as Java Application command. If everything is fine with your application, it will print the following message.
Going to setup student profile.
Name : Zara
Age : 11
The above-defined @Pointcut uses an expression to select method getAge() defined in class(es) under the package com.tutorialspoint. @After advice uses the above-defined Pointcut as a parameter. Effectively afterAdvice() method will be called before every method covered by the above Pointcut.
Print
Add Notes
Bookmark this page
|
[
{
"code": null,
"e": 2401,
"s": 2269,
"text": "@Before is an advice type which ensures that an advice runs before the method execution. Following is the syntax of @Before advice."
},
{
"code": null,
"e": 2614,
"s": 2401,
"text": "@Pointcut(\"execution(* com.tutorialspoint.Student.getName(..))\")\nprivate void selectGetName(){}\n\n@Before(\"selectGetName()\")\npublic void beforeAdvice(){\n System.out.println(\"Going to setup student profile.\");\n} "
},
{
"code": null,
"e": 2621,
"s": 2614,
"text": "Where,"
},
{
"code": null,
"e": 2663,
"s": 2621,
"text": "@Pointcut β Mark a function as a Pointcut"
},
{
"code": null,
"e": 2705,
"s": 2663,
"text": "@Pointcut β Mark a function as a Pointcut"
},
{
"code": null,
"e": 2793,
"s": 2705,
"text": "execution( expression ) β Expression covering methods on which advice is to be applied."
},
{
"code": null,
"e": 2881,
"s": 2793,
"text": "execution( expression ) β Expression covering methods on which advice is to be applied."
},
{
"code": null,
"e": 2973,
"s": 2881,
"text": "@Before β Mark a function as an advice to be executed before method(s) covered by Pointcut."
},
{
"code": null,
"e": 3065,
"s": 2973,
"text": "@Before β Mark a function as an advice to be executed before method(s) covered by Pointcut."
},
{
"code": null,
"e": 3339,
"s": 3065,
"text": "To understand the above-mentioned concepts related to @Before Advice, let us write an example which will implement @Before Advice. To write our example with few advices, let us have a working Eclipse IDE in place and use the following steps to create a Spring application β"
},
{
"code": null,
"e": 3488,
"s": 3339,
"text": "Following is the content of Logging.java file. This is actually a sample of aspect module, which defines the methods to be called at various points."
},
{
"code": null,
"e": 4186,
"s": 3488,
"text": "package com.tutorialspoint;\n\nimport org.aspectj.lang.annotation.Aspect;\nimport org.aspectj.lang.annotation.Pointcut;\nimport org.aspectj.lang.annotation.Before;\n\n@Aspect\npublic class Logging {\n\n /** Following is the definition for a Pointcut to select\n * all the methods available. So advice will be called\n * for all the methods.\n */\n @Pointcut(\"execution(* com.tutorialspoint.Student.getName(..))\")\n private void selectGetName(){}\n /** \n * This is the method which I would like to execute\n * before a selected method execution.\n */\n @Before(\"selectGetName()\")\n public void beforeAdvice(){\n System.out.println(\"Going to setup student profile.\");\n } \n}"
},
{
"code": null,
"e": 4237,
"s": 4186,
"text": "Following is the content of the Student.java file."
},
{
"code": null,
"e": 4797,
"s": 4237,
"text": "package com.tutorialspoint;\n\npublic class Student {\n private Integer age;\n private String name;\n\n public void setAge(Integer age) {\n this.age = age;\n }\n public Integer getAge() {\n System.out.println(\"Age : \" + age );\n return age;\n }\n public void setName(String name) {\n this.name = name;\n }\n public String getName() {\n System.out.println(\"Name : \" + name );\n return name;\n }\n public void printThrowException(){\n System.out.println(\"Exception raised\");\n throw new IllegalArgumentException();\n }\n}"
},
{
"code": null,
"e": 4848,
"s": 4797,
"text": "Following is the content of the MainApp.java file."
},
{
"code": null,
"e": 5283,
"s": 4848,
"text": "package com.tutorialspoint;\n\nimport org.springframework.context.ApplicationContext;\nimport org.springframework.context.support.ClassPathXmlApplicationContext;\n\npublic class MainApp {\n public static void main(String[] args) {\n ApplicationContext context = new ClassPathXmlApplicationContext(\"Beans.xml\");\n\n Student student = (Student) context.getBean(\"student\");\n student.getName();\n student.getAge(); \n }\n}"
},
{
"code": null,
"e": 5330,
"s": 5283,
"text": "Following is the configuration file Beans.xml."
},
{
"code": null,
"e": 6163,
"s": 5330,
"text": "<?xml version = \"1.0\" encoding = \"UTF-8\"?>\n<beans xmlns = \"http://www.springframework.org/schema/beans\"\n xmlns:xsi = \"http://www.w3.org/2001/XMLSchema-instance\" \n xmlns:aop = \"http://www.springframework.org/schema/aop\"\n xsi:schemaLocation = \"http://www.springframework.org/schema/beans\n http://www.springframework.org/schema/beans/spring-beans-3.0.xsd \n http://www.springframework.org/schema/aop \n http://www.springframework.org/schema/aop/spring-aop-3.0.xsd \">\n\n <aop:aspectj-autoproxy/>\n\n <!-- Definition for student bean -->\n <bean id = \"student\" class = \"com.tutorialspoint.Student\">\n <property name = \"name\" value = \"Zara\" />\n <property name = \"age\" value = \"11\"/> \n </bean>\n\n <!-- Definition for logging aspect -->\n <bean id = \"logging\" class = \"com.tutorialspoint.Logging\"/> \n</beans>"
},
{
"code": null,
"e": 6418,
"s": 6163,
"text": "Once you are done creating the source and configuration files, run your application. Rightclick on MainApp.java in your application and use run as Java Application command. If everything is fine with your application, it will print the following message."
},
{
"code": null,
"e": 6472,
"s": 6418,
"text": "Going to setup student profile.\nName : Zara\nAge : 11\n"
},
{
"code": null,
"e": 6765,
"s": 6472,
"text": "The above-defined @Pointcut uses an expression to select method getAge() defined in class(es) under the package com.tutorialspoint. @After advice uses the above-defined Pointcut as a parameter. Effectively afterAdvice() method will be called before every method covered by the above Pointcut."
},
{
"code": null,
"e": 6772,
"s": 6765,
"text": " Print"
},
{
"code": null,
"e": 6783,
"s": 6772,
"text": " Add Notes"
}
] |
Check if a Matrix is Symmetric or not in R Programming - isSymmetric() Function - GeeksforGeeks
|
16 Jun, 2020
isSymmetric() function in R Language is used to check if a matrix is a symmetric matrix. A Symmetric matrix is one whose transpose is equal to the matrix itself.
Syntax: isSymmetric(x)
Parameters:x: Matrix to be checked
Example 1:
# R program to check if # a matrix is symmetric # Creating a diagonal matrixx1 <- diag(3) # Creating a matrixx2 <- matrix(c(1, 2, 2, 3), 2) # Calling isSymmetric() functionisSymmetric(x1)isSymmetric(x2)
Output:
[1] TRUE
[1] TRUE
Example 2:
# R program to check if # a matrix is symmetric # Creating a matrixx1 <- matrix(c(1:9), 3, 3) # Calling isSymmetric() functionisSymmetric(x1)
Output:
[1] FALSE
R Matrix-Function
R Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Change Color of Bars in Barchart using ggplot2 in R
How to Change Axis Scales in R Plots?
Group by function in R using Dplyr
How to Split Column Into Multiple Columns in R DataFrame?
How to filter R DataFrame by values in a column?
How to import an Excel File into R ?
How to filter R dataframe by multiple conditions?
Replace Specific Characters in String in R
R - if statement
Time Series Analysis in R
|
[
{
"code": null,
"e": 25242,
"s": 25214,
"text": "\n16 Jun, 2020"
},
{
"code": null,
"e": 25404,
"s": 25242,
"text": "isSymmetric() function in R Language is used to check if a matrix is a symmetric matrix. A Symmetric matrix is one whose transpose is equal to the matrix itself."
},
{
"code": null,
"e": 25427,
"s": 25404,
"text": "Syntax: isSymmetric(x)"
},
{
"code": null,
"e": 25462,
"s": 25427,
"text": "Parameters:x: Matrix to be checked"
},
{
"code": null,
"e": 25473,
"s": 25462,
"text": "Example 1:"
},
{
"code": "# R program to check if # a matrix is symmetric # Creating a diagonal matrixx1 <- diag(3) # Creating a matrixx2 <- matrix(c(1, 2, 2, 3), 2) # Calling isSymmetric() functionisSymmetric(x1)isSymmetric(x2)",
"e": 25679,
"s": 25473,
"text": null
},
{
"code": null,
"e": 25687,
"s": 25679,
"text": "Output:"
},
{
"code": null,
"e": 25706,
"s": 25687,
"text": "[1] TRUE\n[1] TRUE\n"
},
{
"code": null,
"e": 25717,
"s": 25706,
"text": "Example 2:"
},
{
"code": "# R program to check if # a matrix is symmetric # Creating a matrixx1 <- matrix(c(1:9), 3, 3) # Calling isSymmetric() functionisSymmetric(x1)",
"e": 25861,
"s": 25717,
"text": null
},
{
"code": null,
"e": 25869,
"s": 25861,
"text": "Output:"
},
{
"code": null,
"e": 25880,
"s": 25869,
"text": "[1] FALSE\n"
},
{
"code": null,
"e": 25898,
"s": 25880,
"text": "R Matrix-Function"
},
{
"code": null,
"e": 25909,
"s": 25898,
"text": "R Language"
},
{
"code": null,
"e": 26007,
"s": 25909,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26059,
"s": 26007,
"text": "Change Color of Bars in Barchart using ggplot2 in R"
},
{
"code": null,
"e": 26097,
"s": 26059,
"text": "How to Change Axis Scales in R Plots?"
},
{
"code": null,
"e": 26132,
"s": 26097,
"text": "Group by function in R using Dplyr"
},
{
"code": null,
"e": 26190,
"s": 26132,
"text": "How to Split Column Into Multiple Columns in R DataFrame?"
},
{
"code": null,
"e": 26239,
"s": 26190,
"text": "How to filter R DataFrame by values in a column?"
},
{
"code": null,
"e": 26276,
"s": 26239,
"text": "How to import an Excel File into R ?"
},
{
"code": null,
"e": 26326,
"s": 26276,
"text": "How to filter R dataframe by multiple conditions?"
},
{
"code": null,
"e": 26369,
"s": 26326,
"text": "Replace Specific Characters in String in R"
},
{
"code": null,
"e": 26386,
"s": 26369,
"text": "R - if statement"
}
] |
Set PIN validation with JavaScript?
|
You can validate pin on the basis of length and type of pin must be string etc.
Following is the code β
function simpleValidationForPin(pinValues) {
if (!(typeof pinValues === "string" && !~pinValues.indexOf('.') && !isNaN(Number(pinValues)) && (pinValues.length === 2 || pinValues.length === 4))) {
return false;
} else {
return true;
}
}
if (simpleValidationForPin("0000.00") == true)
console.log("This is a valid pin")
else
console.log("This is not a valid pin")
if (simpleValidationForPin(66) == true)
console.log("This is a valid pin")
else
console.log("This is not valid pin")
if (simpleValidationForPin("4444") == true)
console.log("This is a valid pin")
else
console.log("This is not a valid pin")
if (simpleValidationForPin("66") == true)
console.log("This is a valid pin")
else
console.log("This is not valid pin")
if (simpleValidationForPin("666") == true)
console.log("This is a valid pin")
else
console.log("This is not a valid pin")
To run the above program, you need to use the following command β
node fileName.js.
Here, my file name is demo254.js.
This will produce the following output on console β
PS C:\Users\Amit\javascript-code> node demo254.js
This is not a valid pin
This is not a valid pin
This is a valid pin
This is a valid pin
This is not a valid pin
|
[
{
"code": null,
"e": 1142,
"s": 1062,
"text": "You can validate pin on the basis of length and type of pin must be string etc."
},
{
"code": null,
"e": 1166,
"s": 1142,
"text": "Following is the code β"
},
{
"code": null,
"e": 2060,
"s": 1166,
"text": "function simpleValidationForPin(pinValues) {\n if (!(typeof pinValues === \"string\" && !~pinValues.indexOf('.') && !isNaN(Number(pinValues)) && (pinValues.length === 2 || pinValues.length === 4))) {\n return false;\n } else {\n return true;\n }\n}\nif (simpleValidationForPin(\"0000.00\") == true)\n console.log(\"This is a valid pin\")\nelse\n console.log(\"This is not a valid pin\")\nif (simpleValidationForPin(66) == true)\n console.log(\"This is a valid pin\")\nelse\n console.log(\"This is not valid pin\")\nif (simpleValidationForPin(\"4444\") == true)\n console.log(\"This is a valid pin\")\nelse\n console.log(\"This is not a valid pin\")\nif (simpleValidationForPin(\"66\") == true)\n console.log(\"This is a valid pin\")\nelse\n console.log(\"This is not valid pin\")\nif (simpleValidationForPin(\"666\") == true)\n console.log(\"This is a valid pin\")\nelse\n console.log(\"This is not a valid pin\")"
},
{
"code": null,
"e": 2126,
"s": 2060,
"text": "To run the above program, you need to use the following command β"
},
{
"code": null,
"e": 2144,
"s": 2126,
"text": "node fileName.js."
},
{
"code": null,
"e": 2178,
"s": 2144,
"text": "Here, my file name is demo254.js."
},
{
"code": null,
"e": 2230,
"s": 2178,
"text": "This will produce the following output on console β"
},
{
"code": null,
"e": 2392,
"s": 2230,
"text": "PS C:\\Users\\Amit\\javascript-code> node demo254.js\nThis is not a valid pin\nThis is not a valid pin\nThis is a valid pin\nThis is a valid pin\nThis is not a valid pin"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.