serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
501
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{76.80,141.84},{73.91,133.16},{65.59,135.84},{77.08,144.27},
{83.32,166.24},{72.64,139.99},{69.42,137.04},{82.61,146.08},
{73.55,125.13},{68.93,133.75},{65.26,120.97},{78.19,141.47},
{ 1.86,40.06},{69.95,122.74},{ 2.32,35.00},{53.17,120.38},
{29.55,74.36},{73.51,119.81},{73.29,129.87},{99.93,182.89},
{28.58,80.09},{98.15,165.02},{87.87,154.79},{52.68,90.48},
{95.88,175.41},{85.56,155.25},{70.52,118.85},{ 2.72,20.43},
{58.10,100.25},{62.74,118.35},{18.29,50.38},{15.05,60.11},
{22.10,47.04},{25.33,65.98},{65.87,128.00},{51.66,127.14},
{79.95,133.20},{38.12,88.95},{98.50,159.87},{21.00,52.36},
{43.80,91.36},{85.22,138.76},{39.41,89.16},{15.74,26.40},
{67.15,108.89},{37.24,88.37},{35.35,67.93},{91.42,158.11},
{46.60,82.54},{37.68,85.78},{55.62,113.91},{ 2.02,21.36},
{84.91,166.55},{ 8.85,35.27},{ 6.00,35.70},{98.54,172.74},
{33.24,67.28},{15.37,54.73},{81.85,138.13},{13.21,35.51},
{18.19,39.85},{74.19,133.01},{84.49,162.54},{90.24,167.41},
{61.38,121.57},{20.98,61.98},{29.03,76.72},{53.11,110.71},
{38.99,82.43},{59.75,101.30},{25.68,53.90},{34.02,67.91},
{84.81,131.19},{77.47,145.16},{58.10,92.30},{56.57,94.05},
{74.41,158.89},{53.02,107.64},{23.68,77.01},{48.88,102.20},
{83.06,143.91},{15.93,61.90},{27.01,59.22},{78.96,134.04},
{75.43,127.47},{94.50,158.75},{40.92,78.41},{91.71,151.42},
{ 1.97,41.60},{11.47,45.38},{54.42,114.94},{80.83,150.03},
{30.04,64.02},{44.17,94.37},{10.27,43.22},{88.84,139.06},
{33.72,89.85},{97.14,172.86},{75.24,136.22},{58.14,130.17},
{71.66,146.04},{39.01,85.49},{12.53,74.58},{19.86,59.84},
{90.36,162.15},{42.05,85.60},{11.34,46.50},{38.25,82.33},
{56.03,102.81},{79.53,152.62},{45.92,73.14},{73.10,120.38},
{38.44,69.17},{ 3.18,46.09},{89.02,151.21},{79.64,140.20},
{59.32,115.04},{ 4.82,28.94},{22.23,90.79},{78.46,119.28},
{94.31,160.98},{76.89,141.23},{ 5.95,29.48},{67.27,133.32},
{44.10,89.40},{69.11,137.57},{79.19,151.24},{30.05,67.92},
{52.81,128.68},{65.71,116.48},{79.58,134.68},{56.10,103.72},
{25.96,87.41},{99.04,171.89},{55.01,100.03},{52.79,107.16},
{79.91,144.45},{32.81,65.02},{73.50,118.30},{64.85,123.67},
{67.87,114.03},{37.47,82.32},{13.30,56.88},{26.45,57.69},
{83.68,125.67},{ 2.12,39.21},{ 6.53,35.89},{76.61,118.93},
{11.18,29.40},{33.59,70.34},{49.78,110.01},{ 4.33,36.51},
{62.64,126.51},{17.96,64.00},{36.71,66.56},{88.59,159.96},
{95.07,175.93},{74.10,142.50},{74.76,135.49},{10.21,48.66},
{25.75,85.88},{50.01,94.95},{39.80,93.54},{14.44,62.55},
{79.73,147.16},{29.17,65.58},{18.52,52.66},{54.73,100.30},
{56.56,89.64},{87.15,137.47},{37.12,74.12},{62.75,120.94},
{60.14,110.71},{95.94,170.44},{66.99,137.00},{31.35,85.48},
{79.39,130.65},{40.20,91.54},{68.85,136.78},{16.09,58.59},
{39.57,77.44},{88.74,164.96},{51.84,74.76},{14.10,52.03},
{66.02,117.94},{ 8.71,49.97},{87.28,144.57},{34.63,63.25},
{80.07,154.95},{67.92,127.80},{ 1.57,38.91},{12.79,50.94},
{19.52,53.00},{68.04,127.41},{20.66,60.34},{48.99,117.19},
{20.29,60.77},{64.41,123.36},{52.94,101.32},{29.32,63.73},
{86.66,154.85},{73.95,127.20},{88.87,152.73},{80.97,146.15},
{53.59,100.46},{92.23,150.49},{61.22,120.55},{46.66,107.47},
{70.35,133.38},{77.13,146.97},{15.05,47.88},{15.43,63.59},
{60.54,131.30},{45.81,87.73},{76.11,144.77},{39.78,84.86},
{18.05,38.73},{96.55,179.51},{13.75,56.30},{71.24,133.26},
{ 6.04,48.27},{21.18,46.80},{53.76,123.66},{82.45,125.86},
{18.49,53.38},{10.93,58.21},{79.28,134.70},{90.84,163.49},
{88.23,157.72},{10.24,37.48},{ 4.06,34.97},{52.32,110.39},
{30.49,63.88},{32.90,77.32},{80.03,135.88},{ 7.99,39.79},
{46.58,75.04},{68.28,118.04},{36.46,79.32},{57.91,100.57},
{42.31,97.60},{73.06,135.84},{26.16,74.49},{58.33,122.36},
{21.83,59.63},{90.91,167.94},{67.31,103.49},{83.28,151.87},
{18.74,52.50},{25.28,87.07},{ 0.04,48.99},{15.70,57.91},
{69.08,122.75},{61.44,130.76},{99.28,170.25},{ 4.70,44.28},
{21.01,51.11},{83.12,148.84},{94.96,171.58},{52.57,102.65},
{73.17,141.20},{52.02,108.60},{89.72,160.15},{18.17,55.31},
{37.16,79.58},{85.51,165.97},{13.61,62.15},{50.21,115.56},
{37.08,71.23},{61.61,114.52},{50.45,91.25},{62.31,107.83},
{89.71,143.58},{24.52,50.59},{68.68,131.27},{64.42,129.75},
{15.32,50.66},{31.93,68.03},{73.46,139.28},{ 3.37,27.10},
{49.84,109.19},{15.24,52.48},{63.01,128.75},{87.87,163.91},
{72.28,129.27},{55.87,113.20},{50.08,98.45},{88.77,156.30},
{40.90,90.24},{52.45,121.75},{34.18,75.42},{ 2.08,41.22},
{97.76,164.01},{49.10,97.53},{ 5.78,58.18},{50.77,92.78},
{29.77,74.05},{57.32,95.04},{62.64,127.56},{58.64,115.55},
{39.39,109.48},{ 4.66,47.66},{16.72,56.61},{92.34,145.17},
{42.98,105.02},{85.37,144.96},{81.34,150.80},{69.35,113.25},
{13.61,55.21},{64.56,129.05},{99.87,174.79},{91.63,164.57},
{23.05,91.57},{ 5.46,43.28},{27.43,84.68},{52.33,90.64},
{20.48,69.31},{78.49,157.01},{99.77,179.69},{62.42,123.76},
{58.35,118.29},{14.99,70.97},{62.30,121.40},{22.72,60.52},
{99.76,161.94},{38.45,70.05},{97.83,166.09},{57.61,134.00},
{36.54,80.11},{88.36,165.33},{29.18,83.77},{57.23,108.37},
{72.49,135.62},{ 3.47,38.93},{65.63,129.64},{90.85,167.02},
{87.52,172.65},{ 4.62,37.46},{18.33,43.25},{75.19,153.75},
{45.61,100.25},{85.86,163.44},{55.67,111.10},{25.74,79.05},
{68.37,123.11},{28.28,69.28},{38.78,98.75},{41.30,74.09},
{ 8.75,51.61},{77.69,125.88},{32.13,65.51},{58.65,108.48},
{89.71,150.18},{47.96,93.88},{51.00,80.92},{46.89,103.89},
{46.26,96.89},{13.87,35.50},{49.68,82.47},{84.04,140.36},
{37.19,76.46},{ 5.07,56.07},{86.56,149.09},{92.96,159.47},
{40.03,82.41},{ 2.90,13.57},{49.34,98.62},{ 3.27,32.40},
{11.55,37.57},{97.95,159.99},{57.72,108.86},{57.86,110.39},
{98.70,169.60},{88.71,148.15},{19.49,65.21},{54.49,101.01},
{19.52,58.02},{46.56,79.03},{31.47,63.96},{61.20,128.64},
{40.12,94.46},{46.43,96.10},{95.94,161.45},{ 6.65,38.08},
{ 0.43,36.11},{20.73,67.54},{38.92,99.40},{86.38,161.23},
{66.40,123.71},{93.10,158.11},{99.87,171.41},{52.58,94.12},
{98.77,172.28},{96.98,177.97},{38.77,71.09},{81.98,138.21},
{95.55,158.03},{94.06,159.42},{73.09,136.27},{90.48,180.71},
{48.31,90.76},{19.54,72.85},{92.72,164.87},{13.27,36.49},
{ 6.85,33.02},{15.48,57.51},{ 1.16,13.57},{88.43,161.05},
{86.72,151.66},{63.94,112.18},{ 1.25,24.67},{74.26,138.29},
{ 1.10,29.32},{91.18,142.29},{38.38,92.64},{26.63,67.12},
{72.40,139.89},{ 8.29,31.60},{ 0.02,39.77},{91.48,151.26},
{42.17,86.16},{26.42,43.92},{40.27,91.64},{10.38,51.42},
{20.00,54.18},{78.75,145.54},{12.44,47.88},{95.58,176.01},
{27.10,66.61},{20.58,71.93},{97.79,156.01},{11.65,64.15},
{59.69,122.96},{35.39,81.41},{22.81,50.30},{16.16,46.29},
{84.75,142.39},{46.08,74.86},{25.67,52.99},{97.77,155.99},
{87.77,160.64},{33.83,67.16},{37.26,85.91},{74.81,128.92},
{68.78,132.78},{ 3.84,35.74},{21.67,53.12},{89.23,163.96},
{80.66,156.05},{ 2.80,31.53},{33.31,45.40},{41.13,87.83},
{23.59,74.18},{24.78,61.40},{78.06,125.39},{23.63,67.79},
{97.24,163.05},{57.61,92.44},{99.91,182.09},{81.92,142.72},
{ 3.80,39.87},{22.59,62.84},{40.81,89.25},{54.14,103.07},
{75.21,113.13},{49.96,95.61},{67.06,129.33},{55.40,87.85},
{31.59,75.65},{48.21,96.10},{41.34,99.65},{56.25,106.02},
{ 9.52,53.66},{70.69,131.01},{47.96,107.16},{18.06,52.70},
{20.40,43.03},{79.46,158.10},{22.82,68.78},{84.27,158.87},
{ 7.56,48.96},{21.12,68.79},{39.89,84.94},{86.02,147.43},
{14.47,64.44},{90.07,154.50},{63.38,133.42},{37.80,76.64},
{68.66,130.16},{62.35,131.18},{14.86,43.80},{ 6.96,17.52},
{16.70,50.42},{ 9.81,27.11},{12.19,36.12},{44.33,78.86},
{31.61,82.77},{97.48,168.20},{10.81,27.75},{13.75,56.21},
{34.29,80.84},{43.69,105.87},{54.68,108.96},{79.73,147.53},
{61.62,128.04},{73.20,127.82},{36.97,87.76},{12.32,58.22},
{34.46,100.48},{22.89,59.72},{84.91,151.54},{43.43,96.84},
{51.08,113.87},{92.00,143.99},{76.91,123.46},{45.28,88.12},
{27.89,79.00},{ 4.47,55.66},{25.29,66.38},{88.23,154.76},
{48.29,97.80},{73.62,116.98},{79.61,137.75},{86.57,154.09},
{67.17,129.19},{25.80,70.83},{87.25,161.52},{64.78,127.78},
{67.09,130.55},{85.80,135.92},{46.81,87.55},{71.45,149.02},
{75.36,137.01},{30.13,73.87},{ 7.97,45.84},{66.93,135.67},
{ 6.84,52.61},{63.42,119.19},{33.74,78.18},{ 6.98,39.25},
{98.47,171.90},{28.73,66.90},{94.63,157.45},{95.85,170.74},
{31.42,77.86},{10.33,43.96},{ 7.50,28.74},{85.43,160.97},
{72.92,120.06},{70.63,141.20},{89.19,154.32},{ 1.28,49.29},
{13.59,46.03},{61.11,125.53},{ 5.27,64.32},{19.77,44.45},
{95.49,158.30},{10.00,39.59},{97.35,181.66},{96.40,159.11},
{25.14,69.61},{89.18,141.99},{90.52,154.82},{69.02,143.17},
{72.48,135.19},{87.45,149.80},{97.18,163.59},{30.97,68.55},
{20.60,72.67},{47.12,94.02},{51.85,96.36},{23.80,78.13},
{87.26,150.01},{14.46,59.40},{99.77,144.05},{46.96,88.39},
{58.25,109.93},{85.37,147.30},{23.46,90.32},{98.69,171.96},
{16.95,46.18},{42.41,101.69},{10.42,59.19},{75.26,126.84},
{30.39,81.77},{37.02,93.26},{58.49,110.09},{89.10,162.93},
{68.61,132.29},{76.17,144.98},{45.37,91.14},{39.45,89.34},
{63.16,129.10},{19.58,53.00},{23.00,64.87},{88.56,157.52},
{80.32,141.54},{55.62,115.72},{49.44,109.66},{98.69,175.29},
{88.65,166.47},{59.01,127.46},{34.62,73.17},{41.17,99.55},
{87.75,147.26},{94.03,156.18},{55.08,108.49},{98.89,173.47},
{49.82,90.69},{87.73,160.65},{16.47,46.46},{41.34,79.62},
{83.15,166.44},{14.92,57.61},{21.80,67.82},{37.69,69.32},
{49.33,86.80},{90.91,147.04},{93.07,149.61},{25.44,59.18},
{17.22,49.18},{28.17,72.65},{ 0.77,38.97},{90.87,163.43},
{74.63,137.34},{16.55,49.30},{ 1.12,35.94},{91.42,163.41},
{ 7.28,48.60},{43.66,104.54},{ 2.20,40.26},{63.34,124.06},
{14.44,41.91},{21.21,88.98},{13.05,38.15},{90.07,165.55},
{14.23,59.03},{97.65,177.44},{52.59,89.72},{79.61,144.27},
{30.57,63.58},{99.86,169.58},{14.72,51.55},{31.54,70.10},
{59.28,109.68},{99.01,155.79},{ 4.13,26.79},{74.04,116.03},
{70.44,139.98},{64.71,123.78},{ 5.33,42.21},{71.19,126.62},
{50.18,98.86},{ 2.53,39.51},{23.81,77.92},{40.89,81.47},
{98.40,187.24},{39.88,73.90},{39.42,76.83},{30.46,75.54},
{59.20,109.15},{89.00,145.34},{46.42,88.82},{32.54,72.77},
{ 4.00,45.27},{ 4.85,30.22},{81.77,135.31},{ 0.16,30.49},
{67.78,133.13},{ 0.90,25.09},{58.59,118.38},{15.94,58.65},
{14.91,46.73},{43.82,89.21},{16.87,46.15},{43.14,96.83},
{ 6.28,27.61},{47.25,99.92},{ 4.17,57.60},{90.64,166.35},
{91.91,170.54},{ 8.13,34.07},{76.90,154.01},{12.52,41.40},
{95.64,176.97},{95.90,168.69},{88.69,167.66},{48.93,105.62},
{79.17,139.57},{67.41,107.70},{61.38,117.56},{89.48,166.48},
{19.16,57.11},{66.62,133.08},{44.79,102.21},{16.93,63.03},
{ 8.98,39.98},{66.95,123.43},{53.25,116.97},{93.25,163.17},
{ 1.37,32.85},{ 2.97,34.85},{80.87,150.60},{ 0.78,41.96},
{72.69,143.99},{26.02,85.06},{75.36,139.16},{85.18,162.42},
{36.34,73.88},{ 8.84,34.15},{84.81,148.96},{78.96,137.06},
{92.35,178.55},{54.26,127.97},{78.63,131.07},{59.43,105.79},
{52.22,96.59},{26.93,59.49},{50.87,91.55},{45.79,94.03},
{ 6.65,28.84},{56.94,103.37},{81.17,150.08},{35.22,80.75},
{25.29,67.81},{45.85,94.53},{88.97,170.12},{83.69,126.64},
{87.32,142.75},{95.98,184.02},{91.57,173.77},{31.69,64.55},
{ 3.54,23.12},{50.07,94.48},{18.35,47.95},{30.13,68.41},
{68.27,105.85},{93.84,164.65},{59.83,123.21},{11.37,48.82},
{16.11,42.53},{43.48,97.29},{46.11,93.28},{15.92,54.20},
{47.99,82.39},{52.76,92.39},{54.61,98.69},{26.05,62.64},
{ 2.70,27.78},{45.88,101.97},{69.70,133.74},{93.08,148.81},
{94.21,145.15},{26.78,87.99},{39.36,75.81},{62.67,103.44},
{60.39,105.91},{31.61,91.69},{46.66,102.22},{40.21,71.78},
{17.32,59.38},{89.24,159.24},{ 8.69,37.85},{41.27,94.31},
{92.40,160.41},{13.84,42.44},{90.70,156.55},{ 0.42,24.58},
{16.73,57.77},{98.89,164.23},{50.47,87.52},{61.55,99.37},
{66.83,139.43},{97.54,179.55},{78.85,130.58},{50.54,91.24},
{29.76,72.61},{76.44,150.84},{17.98,50.71},{60.01,128.80},
{86.74,135.73},{23.03,79.65},{90.98,148.41},{32.64,66.55},
{88.30,137.91},{72.69,131.75},{78.37,138.56},{ 3.06,46.75},
{47.35,94.38},{86.94,155.23},{56.80,110.40},{27.56,54.63},
{17.18,65.78},{88.88,160.44},{94.22,139.98},{38.53,89.02},
{65.36,112.75},{80.71,133.50},{15.96,42.45},{48.83,95.69},
{73.66,129.33},{45.90,98.06},{ 6.36,41.17},{ 7.74,32.66},
{ 9.30,42.57},{90.82,137.41},{19.67,52.81},{22.39,51.17},
{42.95,93.53},{65.18,116.03},{41.10,71.11},{ 8.09,29.31},
{84.62,146.49},{29.68,80.89},{50.05,97.61},{81.14,135.28},
{15.61,47.81},{98.10,186.60},{39.06,87.72},{80.94,131.21},
{15.49,33.59},{36.01,82.96},{20.29,78.53},{64.39,98.31},
{70.45,114.03},{50.06,104.96},{97.71,173.93},{67.51,126.77},
{27.84,68.02},{68.61,115.91},{94.33,163.94},{81.11,153.84},
{78.52,153.73},{51.69,126.17},{19.24,50.87},{27.23,75.02},
{17.33,62.66},{59.72,139.84},{36.70,80.89},{47.17,89.34},
{ 9.61,45.28},{45.38,84.42},{70.09,125.18},{27.52,78.87},
{12.20,36.42},{89.21,147.16},{44.13,91.63},{99.17,166.39},
{94.87,160.37},{24.21,75.30},{23.41,49.17},{62.28,109.53},
{13.91,49.57},{25.50,66.32},{63.04,121.17},{38.17,74.32},
{28.15,79.85},{77.84,157.44},{50.06,117.94},{88.97,164.45},
{58.29,121.06},{30.98,76.85},{54.15,108.46},{46.74,115.39},
{28.18,70.58},{98.37,157.20},{82.66,133.94},{34.16,79.28},
{71.70,139.93},{ 9.66,38.94},{20.02,70.45},{83.99,164.25},
{57.41,91.87},{93.45,161.27},{15.09,52.25},{46.67,104.19},
{15.83,48.09},{56.40,115.31},{75.99,129.90},{71.95,137.67},
{62.19,125.27},{64.79,128.82},{40.04,71.35},{37.52,78.35},
{57.41,110.12},{59.51,113.76},{82.35,155.78},{68.11,115.06},
{63.82,135.64},{79.09,132.29},{31.90,68.73},{86.51,140.48},
{94.15,165.22},{25.25,68.16},{85.44,148.52},{42.71,76.69},
{35.97,61.23},{64.06,114.99},{63.34,123.75},{45.82,103.23},
{45.00,91.90},{ 5.05,31.45},{79.00,131.76},{37.62,72.79},
{54.83,98.22},{ 2.45,42.63},{87.14,144.97},{16.61,58.22},
{25.40,67.97},{52.02,109.33},{94.70,165.30},{24.56,69.39},
{26.65,95.29},{20.21,74.69},{32.51,93.53},{77.67,150.18},
{ 7.97,53.99},{17.95,45.32},{14.08,44.40},{97.68,172.42},
{81.04,157.46},{67.94,124.06},{15.28,61.69},{65.24,111.24},
{ 9.81,47.35},{53.35,105.71},{51.27,116.77},{92.44,176.67},
{92.75,157.71},{96.63,170.59},{50.96,102.10},{12.59,56.64},
{87.99,154.97},{53.27,104.83},{89.34,156.25},{89.43,144.96},
{ 4.31,29.94},{38.53,76.07},{71.29,126.18},{48.55,98.93},
{75.68,134.51},{43.97,100.37},{49.42,94.90},{ 3.19,46.01},
{45.93,84.87},{55.20,99.30},{52.74,104.53},{65.60,126.25},
{ 1.83,30.62},{78.75,147.10},{44.84,90.34},{94.01,165.47},
{12.81,46.00},{ 3.20,46.31},{92.04,165.41},{24.39,70.09},
{76.21,145.59},{42.07,99.74},{ 7.83,32.08},{98.32,168.32},
{59.36,126.16},{63.97,128.90},{46.78,97.92},{ 6.73,29.83},
{19.71,40.05},{33.58,73.65},{95.76,177.24},{15.76,35.10},
{ 5.13,57.23},{80.36,145.85},{81.75,164.69},{ 1.42,38.61},
{49.30,97.65},{13.35,36.82},{27.95,63.49},{92.39,172.97},
{69.59,122.40},{79.07,153.47},{83.63,162.86},{37.18,88.83},
{69.71,134.76},{57.08,95.74},{88.42,154.68},{79.00,152.84},
{85.75,142.50},{57.33,108.36},{44.82,93.00},{56.97,102.79},
{36.56,73.41},{66.46,112.74},{ 4.01,59.76},{75.72,144.06},
{89.60,175.98},{90.10,153.07},{16.49,51.91},{87.96,128.17},
{31.01,67.42},{ 5.77,45.91},{ 2.92,34.29},{68.82,132.71}
};
double residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
__device__ double d_residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=cudaMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
cudaDeviceSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
502
|
/*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
namespace chainer_trt {
namespace plugin {
__global__ void transpose_kernel(const float* d_src, float* d_dst,
int* d_indexes, int in_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < in_size)
d_dst[blockIdx.y * in_size + d_indexes[idx]] =
d_src[blockIdx.y * in_size + idx];
}
__global__ void transpose_indexes(int* d_dst, int* i_strides, int* shuffle,
int* i_d, int* o_strides, int id_size,
int in_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // _h
if(idx < in_size) {
int out_idx = 0;
for(int i = 0; i < id_size; i++)
out_idx += (idx / i_strides[shuffle[i]] % i_d[shuffle[i]]) *
o_strides[i];
d_dst[idx] = out_idx;
}
}
void apply_transpose(const float* d_src, float* d_dst, int* d_indexes,
int in_size, int batch_size, cudaStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * in_size / thread_size);
dim3 grid(block_size, batch_size);
transpose_kernel<<<grid, thread_size, 0, stream>>>(d_src, d_dst,
d_indexes, in_size);
}
void initialize_transpose_indexes(int* d_dst, int* i_strides, int* shuffle,
int* i_d, int* o_strides, int in_size,
int id_size) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * in_size / thread_size);
transpose_indexes<<<block_size, thread_size>>>(
d_dst, i_strides, shuffle, i_d, o_strides, id_size, in_size);
}
}
}
|
503
|
// This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 512
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
void vector_reduction_seq(const float *a,
float *c,
const size_t n){
for(int i = 0; i < n; i++){
c[0] += a[i];
}
}
__device__ void warp_reduce(volatile float* sD, int tid) { //unroll last warp (32 threads)
sD[tid] += sD[tid + 32];
sD[tid] += sD[tid + 16];
sD[tid] += sD[tid + 8];
sD[tid] += sD[tid + 4];
sD[tid] += sD[tid + 2];
sD[tid] += sD[tid + 1];
}
__global__ void vector_reduction(float *a,
float *c,
const size_t n){
// compute the global element index this thread should process
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) { //binary reduction
if (tid < s) {
a[i] += a[i + s];
}
__syncthreads();
}
if (tid == 0) atomicAdd(c, a[i]);
}
__global__ void vector_reduction_shared(const float* a, float* c, const size_t n) {
extern __shared__ float sD[];
unsigned int tid = threadIdx.x;
unsigned int blockSize = blockDim.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
sD[tid] = a[i] + a[i+blockSize]; //add on first load
__syncthreads();
for(unsigned int s=blockSize/2; s > 32; s >>= 1) { //binary reduction
if (tid < s) {
sD[tid] += sD[tid + s];
}
__syncthreads();
}
if (tid < 32) warp_reduce(sD, tid); //unroll last warp for block
if (tid == 0) atomicAdd(c,sD[0]); //add each block value to final value
}
int main(void){
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(int);
// points to host & device arrays
float *device_array_a = 0;
float *device_c = 0;
float *host_array_a = 0;
float *host_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_c = (float*)malloc(sizeof(float));
// cudaMalloc the device arrays
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_c, sizeof(float));
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_c == 0 ||
device_array_a == 0 || device_c == 0){
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i){
// make array a a linear ramp
host_array_a[i] = 1;
}
// copy arrays a & b to the device memory space
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
const size_t num_launches = 1;
double average_seq_time;
struct timespec start, end;
std::cout << "Timing sequential implementation...";
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
for(int i = 0; i < num_launches; i++){
vector_reduction_seq(host_array_a, host_c, num_elements);
}
if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
float serialAns = host_c[0];
//compute the time in s
average_seq_time = ( end.tv_sec - start.tv_sec )
+ (double)( end.tv_nsec - start.tv_nsec ) / 1e+9;
//take the average
average_seq_time /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_seq_time << "s" << std::endl;
// compute c = a + b on the device
const size_t block_size = BLOCK_SIZE;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// time the kernel launches using CUDA events
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
float average_time_simple = 0.0;
std::cout << "Timing simple implementation...";
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
// launch the kernel
vector_reduction<<<grid_size, block_size>>>(device_array_a, device_c, num_elements);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float time = 0.0;
// measure the time (ms) spent in the kernel
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_time_simple += time;
}
// copy the result back to the host memory space
cudaMemcpy(host_c, device_c, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << serialAns << " " << host_c[0] << std::endl;
if (serialAns != host_c[0]) return 0;
average_time_simple /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_time_simple << "ms" << std::endl;
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
host_c[0] = 0;
cudaMemcpy(device_c, host_c, sizeof(float), cudaMemcpyHostToDevice);
float average_time_shared = 0.0;
std::cout << "Timing shared implementation...";
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
// launch the kernel
vector_reduction_shared<<<grid_size, block_size/2, (block_size/2)*sizeof(float)>>>(device_array_a, device_c, num_elements);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float time = 0.0;
// measure the time (ms) spent in the kernel
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_time_shared += time;
}
// copy the result back to the host memory space
cudaMemcpy(host_c, device_c, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << serialAns << " " << host_c[0] << std::endl;
if (serialAns != host_c[0]) return 0;
average_time_shared /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_time_shared << "ms" << std::endl;
float num_ops=num_elements;
float seq_throughput = num_ops / (average_seq_time) / 1000000000.0f;
float simple_throughput = num_ops / (average_time_simple / 1000.0f) / 1000000000.0f;
float shared_throughput = num_ops / (average_time_shared / 1000.0f) / 1000000000.0f;
std::cout << "Throughput of sequential: " << seq_throughput << " GB/s" << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GB/s" << std::endl;
std::cout << "Simple performance improvement: " << simple_throughput / seq_throughput << "x" << std::endl;
std::cout << "Throughput of shared kernel: " << shared_throughput << " GB/s" << std::endl;
std::cout << "Shared performance improvement: " << shared_throughput / seq_throughput << "x" << std::endl;
std::cout << "Shared performance over simple improvement: " << shared_throughput / simple_throughput << "x" << std::endl;
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
// deallocate memory
free(host_array_a);
free(host_c);
cudaFree(device_array_a);
cudaFree(device_c);
}
|
504
|
#include<stdio.h>
#define iteration_max 100
#define CEIL(a, b) (((a) + (b) - 1)/(b))
// Function using of the internet
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
/* Traz a GLOBAL para cá */
/* Calc_mandelbrot NVIDIA_CUDA */
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int *buffer){
int index_vector = (blockIdx.x * blockDim.x) + threadIdx.x;
int val_for_cal = buffer[index_vector];
int i = val_for_cal / width;
int j = val_for_cal % width;
float del_x = (max_real - min_real)/width;
float del_y= (max_imag - min_imag)/height;
int iteration = 0;
float x_point = min_real + del_x * i;
float y_point = max_imag - del_y * j;
float z_y = y_point;
float z_x = x_point;
float z_x2 = z_x * z_x;
float z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2 + z_y2) <= 4); iteration++){
z_y = (2.0 * z_x * z_y)+ y_point;
z_x = z_x2 - z_y2 + x_point;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[index_vector] = iteration;
};
extern "C" void invoke_cuda(int tam_vetor, int width, int height, float *min_real, float *min_imag, float *max_real, float *max_imag, int threads, int *vetor){
int *d_vetor;
/* Aloco na placa de video */
checkCudaErrors(cudaMalloc(&d_vetor, tam_vetor*sizeof(int)));
checkCudaErrors(cudaMemcpy(d_vetor, vetor, tam_vetor*sizeof(int), cudaMemcpyHostToDevice));
/* Setar os bocos para trabalhar na placa de video */
int threads_per_block = threads;
int num_blocks = CEIL((tam_vetor), threads_per_block);
kernel<<<num_blocks, threads_per_block>>>(width, height, *min_real, *min_imag, *max_real, *max_imag, d_vetor);
checkCudaErrors(cudaDeviceSynchronize());
/* Pego o buffer da Placa de video e trago para o PC */
checkCudaErrors(cudaMemcpy(vetor, d_vetor, tam_vetor*sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_vetor));
}
|
505
|
__global__ void force_aux0(long k, double *magx_gpu, double *magy_gpu)
{
long i,cind;
__shared__ double mx,my,mgx[512],mgy[512];
cind=threadIdx.x;
mgx[cind]=magx_gpu[cind];
mgy[cind]=magy_gpu[cind];
__syncthreads();
mx=0.0;
my=0.0;
for (i=0;i<k;i++)
{
mx+=mgx[i];
my+=mgy[i];
};
magx_gpu[0]=mx;
magy_gpu[0]=my;
return;
}
|
506
|
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) {
y[i] = x[i] + y[i];
}
}
|
507
|
#include "includes.h"
/**
* This is my first program in learning parallel programming using CUDA.
* Equivalent to a hello World program :-)
* This program basically performs two tasks:
* 1. It selects suitable CUDA enabled device(GPU) and prints the device properties
* 2. It demonstrate basic parallel addition of two arrays on the device(GPU) using add kernel.
* Author: Shubham Singh
**/
#define N 10 /*N is size of arrays*/
using namespace std;
/************************************************************************************************************
* Function: Kernel to perform addition of two arrays in parallel on device(GPU)
* Input: Takes 3 pointer to int variables pointing to some memory locations on the device(GPU)
* Output: None
************************************************************************************************************/
__global__ void add(int *a, int *b, int *c)
{
int i = blockIdx.x; /*blockIDx.x holds ID of block and acts as index*/
if (i < N)
c[i] = a[i] + b[i];
}
|
508
|
#include <iostream>
__global__ void add( int*a, int*b, int*c ) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define N (1024*1024*16)
#define THREADS_PER_BLOCK 1024
int main( void ) {
int *a, *b, *c; // host copies of a,b,c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N *sizeof( int); // we need space for N integers
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
/* initialize random seed: */
srand ( time(NULL) );
for (int i=0; i<N; i ++)
{ a[i] = rand() %100 + 1;
b[i] = rand() %100 + 1;
};
for (int i=0; i<N; i +=N/100)
{
printf("a %i; ",a[i]);
}
printf(" end of a \n\n");
for (int i=0; i<N; i +=N/100)
{
printf("b %i; ",b[i]);
}
printf(" end of b \n\n");
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel with N parallel blocks
add<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c);
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost);
for (int i=0; i<N; i +=N/100)
{
printf("c %i; ",c[i]);
}
printf(" end of c \n");
free( a ); free( b ); free( c );
cudaFree( dev_a);
cudaFree( dev_b);
cudaFree( dev_c);
return 0;
}
|
509
|
#include "includes.h"
__global__ void chainFunction ( const int dim, const int nwl, const int nst, const int ipr, const float *smpls, float *chnFnctn ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * nwl;
if ( i < nwl && j < nst ) {
chnFnctn[t] = smpls[ipr+t*dim];
}
}
|
510
|
#include <fstream>
#include <iostream>
#include <math.h>
#include <cmath>
#include <curand_kernel.h>
#include <cuda.h>
#include <string>
#include <time.h>
__device__ void rot( float *w, float *vec, const float dt)
{
float mw = sqrt(w[0]*w[0] + w[1]*w[1] + w[2]*w[2]);
float omega[3];
float invmw = 1.0f/mw;
omega[0] = w[0]*invmw;
omega[1] = w[1]*invmw;
omega[2] = w[2]*invmw;
float dot = omega[0]*vec[0] + omega[1]*vec[1] + omega[2]*vec[2];
float i1[3];
i1[0] = omega[0]*dot;
i1[1] = omega[1]*dot;
i1[2] = omega[2]*dot;
float i2[3];
i2[0] = vec[0] - i1[0];
i2[1] = vec[1] - i1[1];
i2[2] = vec[2] - i1[2];
float i3[3];
i3[0] = omega[1]*vec[2] - omega[2]*vec[1];
i3[1] = omega[2]*vec[0] - omega[0]*vec[2];
i3[2] = omega[0]*vec[1] - omega[1]*vec[0];
float cwt =cos(mw*dt);
float swt =sin(mw*dt);
vec[0] = i1[0] + i2[0]*cwt + i3[0]*swt;
vec[1] = i1[1] + i2[1]*cwt + i3[1]*swt;
vec[2] = i1[2] + i2[2]*cwt + i3[2]*swt;
}
//-----------------------------------------------------------------------------
__global__ void precessnucspins (float *i, float *s, const int ni, float* hyp, float* wout, const int n, const int size, const float dt)
{
extern __shared__ float iloc[];
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int groupid = blockIdx.x;
int nl = blockDim.x;
int nl1 = blockDim.y;
int ggid = (groupid * nl) + glid;
int ggid1 = (blockIdx.y * nl1) + glid1;
float w[3];
float hyperfine = 0;
int locsize = 3*nl*glid1;
int locid = 3*glid + locsize;
int globsize = 3*ni*ggid1;
int sind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, sind += nl)
{
if (sind < 3*ni)
{
iloc[glid + ii*nl + locsize] = i[sind + globsize];
} else {
iloc[glid + ii*nl + locsize] = 0;
}
}
__syncthreads();
if (ggid < ni)
{
// Idea! ADD CHECK IF GLID = 0 TO PREVENT MEMORY BANK CONFLICTS AND SAVE TO LOC MEM
// OR write to code to prevent strided mem access
hyperfine = hyp[ggid];
w[0] = hyperfine*s[3*ggid1];
w[1] = hyperfine*s[1 + 3*ggid1];
w[2] = hyperfine*s[2 + 3*ggid1];
rot (w, iloc+(locid), dt);
}
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && wind < 3*ni; ++ii, wind += nl)
{
i[wind + globsize] = iloc[glid + locsize + ii*nl];
}
__syncthreads();
iloc[locid] = hyperfine*iloc[locid];
iloc[locid + 1] = hyperfine*iloc[locid + 1];
iloc[locid + 2] = hyperfine*iloc[locid + 2];
#pragma unroll
for (int k=1; k < n; k++)
{
__syncthreads();
int b = nl >> k;
if (glid < b)
{
iloc[locid] += iloc[3*(glid + b)+ locsize];
iloc[locid + 1] += iloc[3*(glid + b) + 1+ locsize];
iloc[locid + 2] += iloc[3*(glid + b) + 2+ locsize];
}
}
__syncthreads();
if (glid == 0)
{
wout[(ggid >> n)*3 + 3*size*ggid1] = iloc[locsize] + iloc[3 + locsize];
wout[(ggid >> n)*3 + 1 + 3*size*ggid1] = iloc[1 + locsize] + iloc[4 + locsize];
wout[(ggid >> n)*3 + 2 + 3*size*ggid1] = iloc[2 + locsize] + iloc[5 + locsize];
}
}
//-----------------------------------------------------------------------------
__global__ void setup_rand(curandState *state, unsigned long seed, const int mcs)
{
unsigned ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(seed, ggid, 4*mcs*ggid, &state[ggid]);
}
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
__global__ void vecbuilds(float *s, float *sinit, curandState *state, const float len)
{
extern __shared__ float sloc[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int nl = blockDim.x;
int glid = threadIdx.x;
int groupid = blockIdx.x;
float v = curand_uniform(&state[ggid]);
float g = curand_uniform(&state[ggid]);
float phi = 2.0*M_PI*v;
float th = acos(cbrtf(2.0*g - 1.0));
sloc[3*glid] = len*sin(th)*cos(phi);
sloc[3*glid + 1] = len*sin(th)*sin(phi);
sloc[3*glid + 2] = len*cos(th);
__syncthreads();
sinit[ggid] = sloc[3*glid + 2];
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, wind += nl)
{
s[wind] = sloc[glid + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void vecbuildi(float *i, curandState *state, const int ni, const int nindium)
{
extern __shared__ float iloc[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int nl = blockDim.x;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int groupid = blockIdx.x;
int ng = nl*gridDim.x;
float m = 0;
if (ggid < nindium){
m = sqrt(99.0f/4.0f);
} else {
m = sqrt(15.0f/4.0f);
}
float v = curand_uniform(&state[ggid + ggid1*ng]);
float phi = 2.0f*M_PI*v;
float g = curand_uniform(&state[ggid + ggid1*ng]);
float th = acos(2.0f*g - 1.0f);
iloc[3*glid + 3*nl*glid1] = m*sin(th)*cos(phi);
iloc[3*glid + 3*nl*glid1 + 1] = m*sin(th)*sin(phi);
iloc[3*glid + 3*nl*glid1 + 2] = m*cos(th);
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && wind < 3*ni; ++ii, wind += nl)
{
i[wind + 3*ni*ggid1] = iloc[glid + 3*nl*glid1 + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void reduce(float *w, const int n, const int a, float *wout, const int size)
{
extern __shared__ float wtemp[];
int nl = blockDim.x;
int groupid = blockIdx.x;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int ggid = (groupid * nl) + glid;
int ggid1 = (blockIdx.y * blockDim.y) + glid1;
int ng = nl*gridDim.x;
int locsize = 3*nl*glid1;
int globsize = 3*ng*ggid1;
int id = 3*glid + locsize;
int wind = 3*nl*groupid + glid;
wtemp[glid + locsize] = w[wind + globsize];
wtemp[glid + nl + locsize] = w[wind + nl + globsize];
wtemp[glid + 2*nl + locsize] = w[wind + 2*nl + globsize];
#pragma unroll
for (int k=1; k < n; k++)
{
__syncthreads();
int b = nl >> k;
if (glid < b)
{
wtemp[id] += wtemp[3*(glid + b)+ locsize];
wtemp[id + 1] += wtemp[3*(glid + b) + 1+ locsize];
wtemp[id + 2] += wtemp[3*(glid + b) + 2+ locsize];
}
}
__syncthreads();
if (glid == 0)
{
wout[(ggid >> n)*3 + 3*size*ggid1] = wtemp[locsize] + wtemp[3 + locsize];
wout[(ggid >> n)*3 + 1 + 3*size*ggid1] = wtemp[1 + locsize] + wtemp[4 + locsize];
wout[(ggid >> n)*3 + 2 + 3*size*ggid1] = wtemp[2 + locsize] + wtemp[5 + locsize];
}
int c = (a + nl - 1)/nl;
__syncthreads();
for(int ii = 0; ii < 3 && wind < 3*size; ++ii, wind += nl)
{
if (wind >= 3*c)
{
wout[wind + 3*size*ggid1] = 0;
}
}
}
//-----------------------------------------------------------------------------
__global__ void precesselecspins(float *w, float *wi, float *s, const int size, const int x,
float *sstore, const int a, const float dt)
{
extern __shared__ float sloc[];
float wtemp[3];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int glid = threadIdx.x;
int nl = blockDim.x;
int groupid = blockIdx.x;
int sind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, sind += nl)
{
sloc[glid + ii*nl] = s[sind];
}
__syncthreads();
wtemp[0] = w[3*a*ggid];
wtemp[1] = w[3*a*ggid + 1];
wtemp[2] = w[3*a*ggid + 2];
sstore[x + size*ggid] = sloc[3*glid + 2];
wtemp[0] = wtemp[0] + wi[0];
wtemp[1] = wtemp[1] + wi[1];
wtemp[2] = wtemp[2] + wi[2];
rot (wtemp, sloc+(3*glid), dt);
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, wind += nl)
{
s[wind] = sloc[glid + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void prep2(float *sstore, float *output, const int size, float *sinit)
{
extern __shared__ float loc[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int nl = blockDim.x;
int ng = nl*gridDim.x;
if (ggid < size)
{
loc[glid + nl*glid1] = sstore[ggid + size*ggid1];
loc[glid + nl*glid1] = loc[glid + nl*glid1]/sinit[ggid1];
}
output[ggid + ng*ggid1] = (1.0/2.0)*loc[glid + nl*glid1];
}
//-----------------------------------------------------------------------------
__global__ void reduce2(const int n, const int a, float *output, float *out)
{
extern __shared__ float sstoretemp[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int nl1 = blockDim.y;
int nl = blockDim.x;
int ng = nl*gridDim.x;
sstoretemp[glid + nl*glid1] = output[ggid + ng*ggid1];
#pragma unroll
for (int k=1; k < n; k++)
{
__syncthreads();
int b = nl1 >> k;
if (glid1 < b)
{
sstoretemp[glid + nl*glid1] += sstoretemp[glid+ nl*(glid1+b)];
}
}
__syncthreads();
if (glid1 == 0)
{
out[ggid + ng*(ggid1 >> n)] = sstoretemp[glid] + sstoretemp[glid + nl];
}
int c = 0;
if (a%nl1 == 0)
{
c = a/nl1;
}
else
{
c = a/nl1 + 1;
}
if (ggid1 > c)
{
out[ggid + ng*ggid1] = 0;
}
}
//-----------------------------------------------------------------------------
__global__ void tensors(float *output, float *Rzz, const int size, const int j, const float recipmcs)
{
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (ggid < size)
{
Rzz[ggid + j*size] = Rzz[ggid + j*size] + output[ggid]*recipmcs;
}
}
//-----------------------------------------------------------------------------
__global__ void final(float *Rzz, const float recipmcs, const int xmax)
{
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (ggid < xmax)
{
Rzz[ggid] = Rzz[ggid]*recipmcs;
}
}
//-----------------------------------------------------------------------------
__global__ void final_temp(float *Rzz, const float recipmcs, const int xmax, float *Rzztemp)
{
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (ggid < xmax)
{
Rzztemp[ggid] = Rzz[ggid]*recipmcs;
}
}
//-----------------------------------------------------------------------------
int main(void)
{
// Code only works if ni <= local_size1**2 - haven't had time to figure out why
int nDevices;
clock_t t;
t = clock();
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
int ni = 100;
int nindium = 50;
float len = sqrt(5.0/12.0);
int local_size1 = 32;
int local_size2 = 32;
int global_blocks1 = (ni + local_size1 - 1)/local_size1;
int global_blocks2 = 32;
int global_size1 = global_blocks1*local_size1;
int global_size2 = global_blocks2*local_size2;
// Set up timestep
float dt = 65.162413696574831;
// Set up maxtime
//float tmax = 30000000.0;
// xmax - total number of timesteps
int xmax = 1000;
// xmax must be a multiple of iterations
int iterations = xmax/1000;
int size = xmax/iterations;
// Global size for final kernels
int global_blocks_tensors = (size + local_size1 - 1)/local_size1;
int global_sizetensors = global_blocks_tensors * local_size1;
dim3 gridSizetensors = dim3 (global_blocks_tensors, global_blocks2);
// Global size for odd reductions
int global_blocks_odd = (global_blocks1 + local_size1 - 1)/local_size1;
int global_size_odd = global_blocks_odd*local_size1;
dim3 gridSizeodd = dim3 (global_blocks_odd, global_blocks2);
// Global size for 3rd reduction
int global_blocks_red = (global_blocks_odd + local_size1 - 1)/local_size1;;
int global_size_red = global_blocks_red*local_size1;
dim3 gridSizered = dim3 (global_blocks_red, global_blocks2);
// Global size for final step
int global_blocks_final = (xmax + local_size1 - 1)/local_size1;
// Set up monte carlo iterations
int mcs = 1;
// Set up 2D workgroups
dim3 blockSize(local_size1, local_size2);
dim3 gridSize = dim3 (global_blocks1, global_blocks2);
// Set up electron spin and initial electron spin arrays
float *s, *sinit;
cudaMallocManaged(&s, 3*global_size2*sizeof(float));
cudaMallocManaged(&sinit, global_size2*sizeof(float));
// Set up external field
float *wi;
cudaMallocManaged(&wi, 3*sizeof(float));
wi[0] = 0.0;
wi[1] = 0.0;
wi[2] = 0.0;
// Set up nuclear spin vector arrays
float *i;
cudaMallocManaged(&i, 3*global_size2*ni*sizeof(float));
// Set up state for random number generation
curandState *state;
cudaMallocManaged((void**)&state, global_size1*global_size2*sizeof(curandState));
// Set up the hyperfine constants
float *hyperfine;
cudaMallocManaged(&hyperfine, ni*sizeof(float));
std::ifstream hyp;
hyp.open("hyp.txt");
int p = 0;
for(std::string line; std::getline(hyp, line); )
{
hyperfine[p]=std::atof(line.c_str());
p += 1;
}
/*
hyperfine[0] =-0.999985;
hyperfine[1] =-0.7369246;
hyperfine[2] =0.511210;
hyperfine[3] =-0.0826998;
hyperfine[4] =0.0655341;
hyperfine[5] =-0.562082;
hyperfine[6] =-0.905911;
hyperfine[7] =0.357729;
hyperfine[8] =0.358593;
hyperfine[9] =0.869386;
hyperfine[10] =-0.232996;
hyperfine[11] =0.0388327;
hyperfine[12] =0.661931;
hyperfine[13] =-0.930856;
hyperfine[14] =-0.893077;
hyperfine[15] =0.0594001;
*/
// Set up omega vector
float *w;
cudaMallocManaged(&w, 3*global_size1*global_size2*sizeof(float));
// Set up output of omega vector
float *wout;
cudaMallocManaged(&wout, 3*global_size_odd*global_size2*sizeof(float));
// Set up tensor vectors
float *Rzz;
cudaMallocManaged(&Rzz, xmax*sizeof(float));
float *Rzztemp;
cudaMallocManaged(&Rzztemp, xmax*sizeof(float));
// Set up electron spin storage vector
float *sstore;
cudaMallocManaged(&sstore, size*global_size2*sizeof(float));
// Set up output
float *output;
cudaMallocManaged(&output, global_sizetensors*global_size2*sizeof(float));
float *out;
cudaMallocManaged(&out, global_sizetensors*global_size2*sizeof(float));
// Work out logs
int n1 = log2f(local_size1);
int n2 = log2f(local_size2);
// Set up seed for random number generation
unsigned long seed = 1;
int pmax = 0;
float time = 0;
// Kernel Calls
// Call random number generation setup kernel
setup_rand<<<global_blocks1*global_blocks2, local_size1*local_size2>>>(state,seed,mcs);
for (int u = 0; u < mcs; ++u)
{
// Build electron spin vectors array
vecbuilds<<<global_blocks2, local_size2, 3*local_size2*sizeof(float)>>>(s, sinit, state, len);
// Build nuclear spin vector array
vecbuildi<<<gridSize, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(i, state, ni, nindium);
// Precess the nuclear spins by dt/2 initially
precessnucspins<<<gridSize, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(i, s, ni, hyperfine, wout, n1, global_size_odd, dt/2.0);
for (int j = 0; j < iterations; ++j)
{
for (int x = 0; x < size; ++x)
{
int p = 0;
int a = global_blocks1;
while (a>1)
{
if (p%2 != 0)
{
reduce<<<gridSizered, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(w, n1, a, wout, global_size_odd);
} else{
reduce<<<gridSizeodd, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(wout, n1, a, w, global_size_red);
}
a = (a + local_size1 - 1)/local_size1;
p = p + 1;
}
pmax = p;
if (pmax%2 != 0)
{
precesselecspins<<<global_blocks2,local_size2,3*local_size2*sizeof(float)>>>(w, wi, s, size, x, sstore, global_size_red, dt);
} else {
precesselecspins<<<global_blocks2,local_size2,3*local_size2*sizeof(float)>>>(wout, wi, s, size, x, sstore, global_size_odd, dt);
}
precessnucspins<<<gridSize, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(i, s, ni, hyperfine, wout, n1, global_size_odd, dt);
}
// Prepare sstore for Rxx, Rxy, Rzz calculation
prep2<<<gridSizetensors, blockSize, local_size1*local_size2*sizeof(float)>>>(sstore, output, size, sinit);
// Reset b between each monte carlo step
int b = global_size2;
int g = 0;
// Reduction in the y direction (over different monte carlo steps running in parallel)
// note that global size in the x direction is now related to xmax (no longer ni)
while (b>1)
{
if (g%2 == 0)
{
reduce2<<<gridSizetensors, blockSize, local_size1*local_size2*sizeof(float)>>>(n2, b, output, out);
} else {
reduce2<<<gridSizetensors, blockSize, local_size1*local_size2*sizeof(float)>>>(n2, b, out, output);
}
b = (b + local_size2 - 1)/local_size2;
g = g + 1;
}
// Sum Rxx, Rxy, Rzz over different monte carlo step iterations - note that this is
// now a 1D workgroup size
if (g%2 ==0)
{
tensors<<<global_blocks_tensors, local_size1>>>(output, Rzz, size, j, 1.0/global_size2);
} else {
tensors<<<global_blocks_tensors, local_size1>>>(out, Rzz, size, j, 1.0/global_size2);
}
}
/*
int o = 0;
if (u != (mcs-1))
{
final_temp<<<global_blocks_final, local_size1>>>(Rzz, (u+1)*global_size2, xmax, Rzztemp);
cudaDeviceSynchronize();
std::ofstream Rzztemp2txt;
Rzztemp2txt.open("Rzz_w=0_10071_temp1.txt");
time = 0;
for (int j = 0; j<xmax; ++j)
{
Rzztemp2txt << time << " " << Rzztemp[j] << "\n";
time += dt;
}
Rzztemp2txt.close();
}
*/
}
final<<<global_blocks_final, local_size1>>>(Rzz, 1.0/mcs, xmax);
cudaDeviceSynchronize();
t = clock() - t;
//auto end = std::chrono::high_resolution_clock::now();
//std::cout << "Time: " << std::chrono::duration_cast<std::chrono::microseconds>(end-start).count() << "us" << std::endl;
std::cout << "Time: " << t << std::endl;
std::ofstream Rzztxt;
Rzztxt.open("root512_100_spins_1.txt");
for (int j = 0; j<xmax; ++j)
{
Rzztxt << j << " " << Rzz[j] << "\n";
//std::cout << time << " " << Rzz[j] << std::endl;
time += dt;
}
Rzztxt.close();
/*
time = 0;
float sum = 0;
for (int j = 0; j<xmax; ++j)
{
if (j <= 1000)
{
Rzztxt << j << " " << Rzz[j] << "\n";
}
else if (j <= 10000)
{
sum = sum + Rzz[j];
if (j%10 == 0)
{
Rzztxt << j - 5 << " " << sum/10.0 << "\n";
sum = 0;
}
}
else if (j <= 100000)
{
sum = sum + Rzz[j];
if (j%100 == 0)
{
Rzztxt << j - 50 << " " << sum/100.0 << "\n";
sum = 0;
}
}
else if (j <= 1000000)
{
if (j%1000 == 0)
{
Rzztxt << j << " " << Rzz[j] << "\n";
}
}
else if (j <= 10000000)
{
if (j%10000 == 0)
{
Rzztxt << j << " " << Rzz[j] << "\n";
}
}
else if (j <= 100000000)
{
if (j%100000 == 0)
{
Rzztxt << j << " " << Rzz[j] << "\n";
}
}
}
*/
Rzztxt.close();
cudaFree(s);
cudaFree(sinit);
cudaFree(state);
cudaFree(i);
cudaFree(hyperfine);
cudaFree(w);
cudaFree(Rzz);
cudaFree(sstore);
cudaFree(output);
cudaFree(wi);
cudaFree(out);
cudaFree(wout);
cudaFree(Rzztemp);
return 0;
}
|
511
|
// Second CUDA program
// Ping-Che Chen
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 16
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
__shared__ float matA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float matB[BLOCK_SIZE][BLOCK_SIZE];
const int tidc = threadIdx.x;
const int tidr = threadIdx.y;
const int bidc = blockIdx.x * BLOCK_SIZE;
const int bidr = blockIdx.y * BLOCK_SIZE;
int i, j;
float results = 0;
float comp = 0;
for(j = 0; j < n; j += BLOCK_SIZE) {
matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];
__syncthreads();
for(i = 0; i < BLOCK_SIZE; i++) {
float t;
comp -= matA[tidr][i] * matB[i][tidc];
t = results - comp;
comp = (t - results) + comp;
results = t;
}
__syncthreads();
}
c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
|
512
|
#include <stdio.h>
#include <cuda_runtime.h>
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess){
printf("cudaGetDeviceCount returned %d\n -> %s\n",
(int)error_id, cudaGetErrorString(error_id));
printf("Result = FALL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0){
printf("Threr are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int maxDevice = 0;
if (deviceCount > 1){
int maxMultiprocessors = 0;
for (int device = 0; device < deviceCount; ++device) {
struct cudaDeviceProp props;
cudaGetDeviceProperties(&props, device);
if (maxMultiprocessors < props.multiProcessorCount){
maxMultiprocessors = props.multiProcessorCount;
maxDevice = device;
}
}
}
int driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(maxDevice);
struct cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, maxDevice);
printf("Device %d: '%s' \n", maxDevice, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/(pow(1024.0,3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %0.f MHz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize){
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size(x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total amount of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Wrap size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum size of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" Maximum size of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
exit(EXIT_SUCCESS);
return 0;
}
|
513
|
__global__ void test_while()
{
int a[5];
int x = 0;
int i = 0;
while (i++ < 5)
{
// i == 1..5
// x == 1..5
++x;
a[x] = 42;
a[x - 1] = 42;
}
// i == 6, x == 5
a[i] = 42;
a[x] = 42;
a[x - 1] = 42;
}
__global__ void test_large_while()
{
const int size = 10000;
int b[size];
int i = 0;
int x = 0;
while (i++ < size)
{
++x;
}
b[x] = 42;
b[x - 1] = 42;
}
__global__ void test_statements_in_while_body()
{
int a[5];
int c[100];
int out_bound = 20;
int inner_bound = 5;
int i = 0;
int x = 0;
while (i++ < out_bound)
{
if (i < 5)
{
a[i] = 42;
}
a[i] = 42;
int j = 0;
while (j++ < inner_bound)
{
++x;
}
}
c[x] = 42;
c[x - 1] = 42;
}
__global__ void test_for()
{
int a[5];
int x = 0;
for (int i = 0; i < 5; ++i)
{
++x;
a[x] = 42;
a[x - 1] = 42;
for (int j = 0; j < 10; ++j)
{
a[j] = 42;
a[j / 2] = 42;
}
a[i] = 42;
a[i + 1] = 42;
}
a[x] = 42;
a[x - 1] = 42;
}
|
514
|
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
extern "C"
__global__ void reduce(float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x;
unsigned int gridSize = blockDim.x*2*gridDim.x;
float mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
|
515
|
#include <cuda.h>
#include <stdio.h>
int main (int argc, char **argv){
int ndev, maxtpb;
cudaGetDeviceCount(&ndev);
printf("Number of GPUs = %4d\n",ndev);
for(int i=0;i<ndev;i++){
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
maxtpb = deviceProps.maxThreadsPerBlock;
printf("GPU device %4d:\n\tName: %s:\n",i,deviceProps.name);
printf("\tCompute capabilities: SM %d.%d\n",
deviceProps.major, deviceProps.minor);
printf("\tMaximum number of threads per block: %4d\n",maxtpb);
printf("\tMaximum number of threads per SM: %4d\n",
deviceProps.maxThreadsPerMultiProcessor);
printf("\tNumber of streaming multiprocessors: %4d\n",
deviceProps.multiProcessorCount);
printf("\tClock rate: %d KHz\n",deviceProps.clockRate);
printf("\tGlobal memory: %lu bytes\n",deviceProps.totalGlobalMem);
}
cudaSetDevice(0);
}
|
516
|
// Copyright (c) 2017 Madhavan Seshadri
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
extern "C" { __global__ void smvp(double *A_data, int *A_indices, int *A_pointers, double *B, double *C, int *m, int *n, int *count, double *alpha){
int ROW = blockIdx.x*blockDim.x+threadIdx.x;
if(ROW<*m){
int start = A_pointers[ROW];
int end = (start==*m-1)?(*count):A_pointers[ROW+1];
double sum = 0;
for(int i = start;i<end;i++)
{
int index = A_indices[i];
sum += (*alpha) * A_data[i] * B[index];
}
C[ROW] = sum;
}
}
}
|
517
|
#include <stdio.h>
#define N 64
#define TPB 32
float scale(int i, int n) {
return ((float)i)/(n-1);
}
__device__
float distance(float x1, float x2) {
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
int main() {
const float ref = 0.5f;
// declare pointers for input and output arrays
float *in = 0;
float *out = 0;
// allocate managed memory for input and output arrays
cudaMallocManaged(&in, sizeof(float));
cudaMallocManaged(&out, sizeof(float));
// compute scaled input values
for (int i = 0; i < N; ++i) {
in[i] = scale(i, N);
}
// launch kernel to compute and store distance values
distanceKernel<<<N/TPB, TPB>>>(out, in, ref);
cudaDeviceSynchronize();
// free the allocated memory
cudaFree(in);
cudaFree(out);
return 0;
}
|
518
|
#include "includes.h"
__global__ void mapKernel(float* out, int functionCode, float frange_start, float dx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
float x = frange_start + id * dx;
float y;
switch (functionCode) {
case 0: y = cos(x); break;
case 1: y = tan(x); break;
default: y = sin(x); break;
}
out[2 * id + 0] = x;
out[2 * id + 1] = y;
}
|
519
|
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void vectorAdd(float *A, float *B, float *C, int numElements)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < numElements)
{
C[i] = A[i] + B[i];
}
}
int main()
{
int numElements = 1000;
size_t size = numElements * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
for(int i=0; i < numElements; i++)
{
h_A[i] = 1;
h_B[i] = 2;
}
float * d_A = NULL;
float * d_B = NULL;
cudaMalloc( (void **)&d_A, size );
cudaMalloc( (void **)&d_B, size );
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
float * d_C = NULL;
cudaMalloc( (void **)&d_C, size );
}
|
520
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <complex.h>
#include <cufft.h>
// Kernel used for assigning values to matrix.
__global__ void assign(int N, cufftDoubleComplex* a, cufftDoubleComplex* a_copy){
if (blockIdx.x < N && blockIdx.y < N){
a_copy[blockIdx.x+blockIdx.y*gridDim.x].x = a[blockIdx.x+blockIdx.y*gridDim.x].x;
a_copy[blockIdx.x+blockIdx.y*gridDim.x].y = a[blockIdx.x+blockIdx.y*gridDim.x].y;
}
}
// FFT shift kernel, works in GPU
__global__ void fftshift(int N, cufftDoubleComplex* a, cufftDoubleComplex* a_shift){
if (blockIdx.x < N/2 && blockIdx.y < N/2){
a_shift[blockIdx.x+blockIdx.y*gridDim.x].x = a[(blockIdx.x+N/2)+(blockIdx.y+N/2)*gridDim.x].x;
a_shift[blockIdx.x+blockIdx.y*gridDim.x].y = a[(blockIdx.x+N/2)+(blockIdx.y+N/2)*gridDim.x].y;
}
else if (blockIdx.x < N/2 && blockIdx.y >= N/2)
a_shift[blockIdx.x+blockIdx.y*gridDim.x] = a[(blockIdx.x+N/2)+(blockIdx.y-N/2)*gridDim.x];
else if (blockIdx.x >= N/2 && blockIdx.y < N/2)
a_shift[blockIdx.x+blockIdx.y*gridDim.x] = a[(blockIdx.x-N/2)+(blockIdx.y+N/2)*gridDim.x];
else if (blockIdx.x >= N/2 && blockIdx.y >= N/2)
a_shift[blockIdx.x+blockIdx.y*gridDim.x] = a[(blockIdx.x-N/2)+(blockIdx.y-N/2)*gridDim.x];
}
// Kernel used to compute 2nd derivative
__global__ void del2A(int N, cufftDoubleComplex* del2A_input, cufftDoubleComplex* del2A_output){
if (blockIdx.x < N && blockIdx.y < N){
del2A_output[blockIdx.x+blockIdx.y*gridDim.x].x =
(-((double)(blockIdx.x) + (double)(-N/2))*((double)(blockIdx.x) + (double)(-N/2))
-((double)(blockIdx.y) + (double)(-N/2))*((double)(blockIdx.y) + (double)(-N/2)))
*del2A_input[blockIdx.x+blockIdx.y*gridDim.x].x/N/N;
del2A_output[blockIdx.x+blockIdx.y*gridDim.x].y =
(-((double)(blockIdx.x) + (double)(-N/2))*((double)(blockIdx.x) + (double)(-N/2))
-((double)(blockIdx.y) + (double)(-N/2))*((double)(blockIdx.y) + (double)(-N/2)))
*del2A_input[blockIdx.x+blockIdx.y*gridDim.x].y/(N*N);
}
}
// Kernel used to update A, A1, A2 matrices using pointers to each matrix, with div used to decrement in time.
__global__ void update(int N, double dt, double c1, double c3, int div, double L, cufftDoubleComplex* A, cufftDoubleComplex* d2A, cufftDoubleComplex* A_new){
if (blockIdx.x < N && blockIdx.y < N){
A_new[blockIdx.x+blockIdx.y*gridDim.x].x = A[blockIdx.x+blockIdx.y*gridDim.x].x + dt/div*(A[blockIdx.x+blockIdx.y*gridDim.x].x
+ (d2A[blockIdx.x+blockIdx.y*gridDim.x].x - c1*d2A[blockIdx.x+blockIdx.y*gridDim.x].y)*(2*M_PI/L)*(2*M_PI/L)
- (A[blockIdx.x+blockIdx.y*gridDim.x].x + c3*A[blockIdx.x+blockIdx.y*gridDim.x].y)
*(A[blockIdx.x+blockIdx.y*gridDim.x].x*A[blockIdx.x+blockIdx.y*gridDim.x].x + A[blockIdx.x+blockIdx.y*gridDim.x].y*A[blockIdx.x+blockIdx.y*gridDim.x].y));
A_new[blockIdx.x+blockIdx.y*gridDim.x].y = A[blockIdx.x+blockIdx.y*gridDim.x].y + dt/div*(A[blockIdx.x+blockIdx.y*gridDim.x].y
+ (d2A[blockIdx.x+blockIdx.y*gridDim.x].y + c1*d2A[blockIdx.x+blockIdx.y*gridDim.x].x)*(2*M_PI/L)*(2*M_PI/L)
- (A[blockIdx.x+blockIdx.y*gridDim.x].y - c3*A[blockIdx.x+blockIdx.y*gridDim.x].x)
*(A[blockIdx.x+blockIdx.y*gridDim.x].x*A[blockIdx.x+blockIdx.y*gridDim.x].x + A[blockIdx.x+blockIdx.y*gridDim.x].y*A[blockIdx.x+blockIdx.y*gridDim.x].y));
}
}
int main(int argc, char* argv[]){
// Start runtime clock
clock_t start_time = clock();
// Set up args to be taken as inputs
ptrdiff_t N = atoi(argv[1]);
double c1 = atof(argv[2]);
double c3 = atof(argv[3]);
int M = atoi(argv[4]);
// Insure same seed is used for all processors
long int seed = (long int)time(NULL);
if (argc >= 6){
seed = atol(argv[5]);
}
srand48(seed);
// Define parameter values
double L = 128*M_PI;
int T = 10000;
double dt = (double)T/M;
int interval = N/10;
// Allocate memory within CPU
cufftDoubleComplex *sol = (cufftDoubleComplex*)malloc(N*N*sizeof(cufftDoubleComplex));
// Allocate memory within GPU
cufftDoubleComplex* A;
cudaMalloc((void**)&A, sizeof(cufftDoubleComplex)*N*N);
cufftDoubleComplex* A_temp;
cudaMalloc((void**)&A_temp, sizeof(cufftDoubleComplex)*N*N);
cufftDoubleComplex* del2A_output;
cudaMalloc((void**)&del2A_output, sizeof(cufftDoubleComplex)*N*N);
cufftDoubleComplex* del2A_temp;
cudaMalloc((void**)&del2A_temp, sizeof(cufftDoubleComplex)*N*N);
cufftDoubleComplex* del2A_shift;
cudaMalloc((void**)&del2A_shift, sizeof(cufftDoubleComplex)*N*N);
// Create and open target file
FILE *fileid = fopen("ComplexGL.out", "w");
// ICs
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
sol[i*N + j].y = (6*drand48() - 3)/2;
sol[i*N + j].y = (6*drand48() - 3)/2;
}
}
// Create copy on GPU
cudaMemcpy(A, sol, N*N*sizeof(cufftDoubleComplex), cudaMemcpyHostToDevice);
dim3 meshDim(N,N);
cufftHandle plan;
cufftPlan2d(&plan, N, N, CUFFT_Z2Z);
// start loop
for (int n = 0; n < M; ++n){
// Begin 1st
// Take in 2nd derivative, assign values
assign<<<meshDim,1>>>(N, A, del2A_output);
// 2nd derivative
cufftExecZ2Z(plan, del2A_output, del2A_temp, CUFFT_FORWARD);
fftshift<<<meshDim,1>>>(N, del2A_temp, del2A_shift);
del2A<<<meshDim,1>>>(N, del2A_shift, del2A_output);
fftshift<<<meshDim,1>>>(N, del2A_output, del2A_shift);
cufftExecZ2Z(plan, del2A_shift, del2A_output, CUFFT_INVERSE);
// A1 update step, dt/4 step
update<<<meshDim,1>>>(N, dt, c1, c3, 4, L, A, del2A_output, A_temp);
// End 1st
// Begin 2nd
assign<<<meshDim,1>>>(N, A_temp, del2A_output);
// 2nd derivative
cufftExecZ2Z(plan, del2A_output, del2A_temp, CUFFT_FORWARD);
fftshift<<<meshDim,1>>>(N, del2A_temp, del2A_shift);
del2A<<<meshDim,1>>>(N, del2A_shift, del2A_output);
fftshift<<<meshDim,1>>>(N, del2A_output, del2A_shift);
cufftExecZ2Z(plan, del2A_shift, del2A_output, CUFFT_INVERSE);
// Update A2, dt/3 step
update<<<meshDim,1>>>(N, dt, c1, c3, 3, L, A_temp, del2A_output, A);
// End 2nd
// Begin 3rd
assign<<<meshDim,1>>>(N, A, del2A_output);
cufftExecZ2Z(plan, del2A_output, del2A_temp, CUFFT_FORWARD);
fftshift<<<meshDim,1>>>(N, del2A_temp, del2A_shift);
del2A<<<meshDim,1>>>(N, del2A_shift, del2A_output);
fftshift<<<meshDim,1>>>(N, del2A_output, del2A_shift);
cufftExecZ2Z(plan, del2A_shift, del2A_output, CUFFT_INVERSE);
// Update A1, dt/2 step
update<<<meshDim,1>>>(N, dt, c1, c3, 2, L, A, del2A_output, A_temp);
// End 3rd
// Begin 4th
assign<<<meshDim,1>>>(N, A_temp, del2A_output);
cufftExecZ2Z(plan, del2A_output, del2A_temp, CUFFT_FORWARD);
fftshift<<<meshDim,1>>>(N, del2A_temp, del2A_shift);
del2A<<<meshDim,1>>>(N, del2A_shift, del2A_output);
fftshift<<<meshDim,1>>>(N, del2A_output, del2A_shift);
cufftExecZ2Z(plan, del2A_shift, del2A_output, CUFFT_INVERSE);
// update A, dt/1 step
update<<<meshDim,1>>>(N, dt, c1, c3, 1, L, A_temp, del2A_output, A);
// Save final solution and store to CPU
if ((n+1)%interval == 0){
cudaThreadSynchronize();
cudaMemcpy(sol, A, N*N*sizeof(cufftDoubleComplex), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
fwrite(&(sol[i*N+j].x), sizeof(double), 1, fileid);
}
}
}
}
// End loop
// Copy back to CPU
cudaThreadSynchronize();
cudaMemcpy(sol, A, N*N*sizeof(cufftDoubleComplex), cudaMemcpyDeviceToHost);
// Free up all memory used
free(sol);
cudaFree(A);
cudaFree(A_temp);
cudaFree(del2A_output);
cudaFree(del2A_temp);
cudaFree(del2A_shift);
cufftDestroy(plan);
// Stop clock, output run time
clock_t end_time = clock();
printf("Runtime:%g s.\n", (float)(end_time - start_time)/CLOCKS_PER_SEC);
return 0;
}
|
521
|
#include <iostream>
#include <cstdio>
using namespace std;
#include <cuda_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void l1_stride_cons(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i*stride] = A[i*stride];
}
__global__ void l1_stride(const float* A, float* C, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[((i/stride)*32)+(i%stride)] = A[((i/stride)*32)+(i%stride)];
}
// Host code
void VectorAddition(int N, int threadsPerBlock, int stride)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float) * 32;
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaThreadSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1_stride_cons<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, stride);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
cudaDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 4)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
}
|
522
|
/**
* Vector reverse: A[i] = B[SIZE - i].
*
*/
#include <stdio.h>
#include <cuda_runtime.h>
// SIZE is defined to be multiple of the number of threads
#define SIZE 4
#define THREADS_PER_BLOCK 4
__global__ void mat_mul( int *A, int *B, int *C, int size)
{
int thrIdx = blockIdx.x * blockDim.x + threadIdx.x;
int k;
//while( thrIdx < size * size )
for( k = 0; k < size; k++ )
C[thrIdx] += A[blockIdx.x * blockDim.x + k] * B[ k * blockDim.x + threadIdx.x];
//thrIdx += gridDim.x + blockDim.x;
}
int main( int argc, char * argv[])
{
int i, j;
int size_in_bytes = SIZE * SIZE * sizeof(int);
int *dev_A;
int *dev_B;
int *dev_C;
// Allocate the host input vector A
int *host_A = (int *) malloc( size_in_bytes );
int *host_B = (int *) malloc( size_in_bytes );
int *host_C = (int *) malloc( size_in_bytes );
// Initialize the host input vectors
for( i = 0; i < SIZE * SIZE; i++ ){
host_A[ i ] = i;
host_B[ i ] = i;
host_C[ i ] = 0;
}
for( i = 0; i < SIZE; i++ )
{
for( j = 0; j < SIZE; j++ )
printf("%d \t", host_A[ i*SIZE + j ]);
printf("\n");
}
for( i = 0; i < SIZE; i++ )
{
for( j = 0; j < SIZE; j++ )
printf("%d \t", host_B[ i*SIZE + j ]);
printf("\n");
}
for( i = 0; i < SIZE; i++ )
{
for( j = 0; j < SIZE; j++ )
printf("%d \t", host_C[ i*SIZE + j ]);
printf("\n");
}
cudaMalloc((void **)&dev_A, size_in_bytes);
cudaMalloc((void **)&dev_B, size_in_bytes);
cudaMalloc((void **)&dev_C, size_in_bytes);
// copy inputs to device
cudaMemcpy(dev_A, host_A, size_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, host_B, size_in_bytes, cudaMemcpyHostToDevice);
int threadsPerBlock = THREADS_PER_BLOCK;
int blocksPerGrid =( SIZE * SIZE ) / threadsPerBlock;
mat_mul<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_B, dev_C, SIZE);
cudaMemcpy(host_C, dev_C, size_in_bytes, cudaMemcpyDeviceToHost);
for( i = 0; i < SIZE; i++ )
{
for( j = 0; j < SIZE; j++ )
printf("%d \t", host_C[ i*SIZE + j ]);
printf("\n");
}
printf("-------------------------\n\n");
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
// Free host memory
free(host_A);
free(host_B);
free(host_C);
return 0;
}
|
523
|
#include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
// check for errors using cuda runtime api
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
// Error: GPUassert: unknown error vecadd.cu 45
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void vecAdd1(float *h_A, float* h_B, float* h_C, int n) {
for (int i = 0; i < n; i++) {
h_C[i] = h_A[i] + h_B[i];
}
}
__global__
void vecAddKernel(float* A, float* B, float* C, int n) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < n) {
C[i] = A[i] + B[i];
}
}
void vecAdd2(float* A, float* B, float* C, int n) {
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, size);
vecAddKernel <<< ceil(n/256.0), 256 >>> (d_A, d_B, d_C, n);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
// gpuErrchk(cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost));
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
for (int i = 0; i < n; i++) {
h_A[i] = 10.0;
h_B[i] = 20.0;
h_C[i] = 0.0;
}
vecAdd2(h_A, h_B, h_C, n);
for(int i = 0; i<n; i++){
printf("%f ", h_C[i]);
}
printf("\n");
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
524
|
#include <stdio.h>
#include <stdlib.h>
#define DIM 1000
#define CUDA_CHECK( err ) (cuda_checker(err, __FILE__, __LINE__))
static void cuda_checker( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator*( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator+( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
__host__ __device__ int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(-0.8, 0.156);
cppComplex a(jx, jy);
int i = 0;
for(i = 0; i < 200; i++){
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void julia_set_cpu() {
unsigned char *pixels = new unsigned char[DIM * DIM];
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++y) {
pixels[x + y * DIM] = 255 * julia(x, y);
}
}
FILE *f = fopen("julia_cpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
delete [] pixels;
}
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
/*Begin the GPU part*/
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
__global__ void kernel( unsigned char *ptr ) {
int tid = threadIdx.x + blockIdx.x * blockDim.x; // row index
while (tid < DIM * DIM) {
ptr[tid] = 255 * julia(tid % DIM, tid / DIM);
tid += blockDim.x * gridDim.x;
}
}
void julia_set_gpu() {
unsigned char *pixels = new unsigned char[DIM * DIM]; // for host
unsigned char *dev_bitmap; // for device
CUDA_CHECK(cudaMalloc((void**)&dev_bitmap, DIM * DIM * sizeof(unsigned char))); // allocate memory on gpu
CUDA_CHECK(cudaMemcpy(dev_bitmap, pixels, DIM * DIM * sizeof(unsigned char), cudaMemcpyHostToDevice)); // copy memory to device
int threads_per_block = 3;
int blocks_per_grid = (DIM * DIM + threads_per_block - 1) / threads_per_block;
kernel<<<blocks_per_grid, threads_per_block>>>(dev_bitmap); // execute
CUDA_CHECK(cudaMemcpy(pixels, dev_bitmap, DIM * DIM * sizeof(unsigned char), cudaMemcpyDeviceToHost)); // copy memory to host
// write to file
FILE *f = fopen("julia_gpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f); // 0 .. 255
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
// free memory
CUDA_CHECK(cudaFree(dev_bitmap));
delete [] pixels;
}
int main( void ) {
float time;
cudaEvent_t start, stop;
// record cpu execution time
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
julia_set_cpu();
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf("Time to generate using CPU: %3.1f ms \n", time);
// record gpu execution time
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
julia_set_gpu();
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf("Time to generate using GPU: %3.1f ms \n", time);
// flush buffer
cudaDeviceReset();
}
|
525
|
#include "includes.h"
__global__ void transposeDiagonalRow(float *out, float *in, const int nx, const int ny)
{
unsigned int blk_y = blockIdx.x;
unsigned int blk_x = (blockIdx.x + blockIdx.y) % gridDim.x;
unsigned int ix = blockDim.x * blk_x + threadIdx.x;
unsigned int iy = blockDim.y * blk_y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[ix * ny + iy] = in[iy * nx + ix];
}
}
|
526
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/inner_product.h>
#include <thrust/for_each.h>
#include <cstdlib>
#include <vector>
typedef double ScalarType;
int main(void) {
/*
// generate 32M random numbers on the host
thrust::host_vector<int> h_vec(32 << 20);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec= h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
std::cout << h_vec[0] << ", " << h_vec[1] << ", " << h_vec[2] << std::endl;
*/
std::vector<double> v1(100,1);
std::vector<double> v2(100,2);
std::vector<double> v1pv2(100,1);
std::vector<double> v1mv2(100,2);
for (int i = 0; i < 100; i++)
{
v1pv2[i] = v1[i] + v2[i];
v1mv2[i] = v1[i] - v2[i];
}
std::cout << "v1pv2: " << v1pv2[0] << std::endl;
std::cout << "v1mv2: " << v1mv2[0] << std::endl;
thrust::host_vector<double> h_v1 = v1pv2;
thrust::host_vector<double> h_v2 = v1mv2;
//h_v1 = h_v1 + h_v2;
//h_v2 = h_v1 - h_v2;
//thrust::generate(h_v1.begin(), h_v1.end(), rand);
//thrust::generate(h_v2.begin(), h_v2.end(), rand);
thrust::device_vector<ScalarType> d_v1 = h_v1;
thrust::device_vector<ScalarType> d_v2 = h_v2;
ScalarType start = 0;
//ScalarType expected_thrust = thrust::inner_product(h_v1.begin(), h_v1.end(), h_v2.begin(), start);
ScalarType expected_thrust = thrust::inner_product(d_v1.begin(), d_v1.end(), d_v2.begin(), start);
std::cout << "host dot: " << expected_thrust << std::endl;
return 0;
}
|
527
|
#include "includes.h"
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
|
528
|
/* simple-warp-divergence.cu */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define CHECK_CUDA_CALL(call) \
{ \
const cudaError_t error = call; \
\
if (error != cudaSuccess) { \
fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \
__FILE__, __LINE__, \
error, cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void warmUp(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
c[id] = 0.0f;
}
__global__ void warpDivergence(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
if (id % 2 == 0)
a = 100.0f;
else
b = 200.0f;
c[id] = a + b;
}
__global__ void noWarpDivergence(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
if ((id / warpSize) % 2 == 0)
a = 100.0f;
else
b = 200.0f;
c[id] = a + b;
}
__global__ void warpDivergencePredicate(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
bool pred = (id % 2 == 0);
if (pred)
a = 100.0f;
if (!pred)
b = 200.0f;
c[id] = a + b;
}
int main(int argc, char** argv)
{
int dev;
cudaDeviceProp deviceProp;
int size;
int blockSize;
size_t numOfBytes;
float* devC;
struct timeval startTime;
struct timeval endTime;
/* Setup device */
dev = 0;
CHECK_CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
/* Set data size */
if (argc > 1)
blockSize = atoi(argv[1]);
else
blockSize = 64;
if (argc > 2)
size = atoi(argv[2]);
else
size = 64;
printf("Data size: %d, Block size: %d\n", size, blockSize);
/* Set execution configuration */
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n",
grid.x, grid.y, block.x, block.y);
numOfBytes = size * sizeof(float);
CHECK_CUDA_CALL(cudaMalloc((float**)&devC, numOfBytes));
CHECK_CUDA_CALL(cudaDeviceSynchronize());
/* Call kernel for warming up */
gettimeofday(&startTime, NULL);
warmUp<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("Warmup execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Call kernel that causes warp divergence */
gettimeofday(&startTime, NULL);
warpDivergence<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("WarpDivergence execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Call kernel that does not cause warp divergence */
gettimeofday(&startTime, NULL);
noWarpDivergence<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("NoWarpDivergence execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Call kernel that uses predicates */
gettimeofday(&startTime, NULL);
warpDivergencePredicate<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("WarpDivergencePredicate execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Free device memory */
CHECK_CUDA_CALL(cudaFree(devC));
/* Reset device */
CHECK_CUDA_CALL(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
529
|
/*
*
* Programa de Introducci�n a los conceptos de CUDA
* Suma dos vectores de enteros e indica qu� partes del c�digo deben modificarse
* para implementar la versi�n paralela en el GPU
*
* Asume un modelo de memoria distribuida
*/
/* Parte 0: A�adir los archivos .h de CUDA*/
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
/* Numero de elementos en el vector */
#define ARRAY_SIZE 256 * 1024
/*
* N�mero de bloques e hilos
* Su producto siempre debe ser el tama�o del vector (arreglo).
*/
#define NUM_BLOCKS 256
#define THREADS_PER_BLOCK 1024
/* Declaraci�n de m�todos/
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
/* Funci�n en C para suma de vectores*/
void vect_add_c(int* a, int* b, int* c) {
printf("Ejecuci�n secuencial \n");
for (int i = 0; i < ARRAY_SIZE; i++) {
c[i] = a[i] + b[i];
}
}
/* Kernel para sumar dos vectores en un s�lo bloque de hilos */
__global__ void vect_add(int* d_a, int* d_b, int* d_c)
{
/* Part 2B: Implementaci�n del kernel para realizar la suma de los vectores en el GPU */
int idx = threadIdx.x;
d_c[idx] = d_a[idx] + d_b[idx];
}
/* Versi�n de m�ltiples bloques de la suma de vectores */
__global__ void vect_add_multiblock(int* d_a, int* d_b, int* d_c)
{
/* Part 2C: Implementaci�n del kernel pero esta vez permitiendo m�ltiples bloques de hilos. */
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx < ARRAY_SIZE) {
d_c[idx] = d_a[idx] + d_b[idx];
}
}
/* Main routine */
int main(int argc, char* argv[])
{
int* h_a, * h_b, * h_c; /* Arreglos del CPU */
int* d_a, * d_b, * d_c;/* Arreglos del GPU */
// cudaError_t err = cudaSuccess; // Para checar errores en CUDA
int i;
size_t sz = ARRAY_SIZE * sizeof(int);
/*
* Reservar memoria en el cpu
*/
h_a = (int*)malloc(sz);
h_b = (int*)malloc(sz);
h_c = (int*)malloc(sz);
/*
* Parte 1A:Reservar memoria en el GPU
*/
cudaMalloc(&d_a, sz);
cudaMalloc(&d_b, sz);
cudaMalloc(&d_c, sz);
/* inicializaci�n */
for (i = 0; i < ARRAY_SIZE; i++) {
h_a[i] = i;
h_b[i] = i + 10;
h_c[i] = 0;
}
/* Parte 1B: Copiar los vectores del CPU al GPU */
cudaMemcpy(d_a, h_a, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, sz, cudaMemcpyHostToDevice);
/* Parte 2A: Configurar y llamar los kernels o la funci�n */
dim3 dimGrid(NUM_BLOCKS);
dim3 dimBlock(THREADS_PER_BLOCK);
const clock_t begin_time = clock();
vect_add_multiblock<<<dimGrid,dimBlock>>>(d_a, d_b, d_c);
//vect_add_c(h_a, h_b, h_c);
printf("Tiempo de ejecuci�n: %f \n", float(clock() - begin_time) / CLOCKS_PER_SEC);
/* Esperar a que todos los threads acaben y checar por errores */
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "No se pudo lanzar el kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Part 1C: copiar el resultado de nuevo al CPU */
cudaMemcpy(h_c, d_c, sz, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
printf("Algunos resultados: ");
for (i = 0; i < 10; i++) {
printf("%d, ", h_c[i]);
}
printf("\n\n");
for (i = ARRAY_SIZE - 10; i < ARRAY_SIZE; i++) {
printf("%d, ", h_c[i]);
}
printf("\n\n");
/* Parte 1D: Liberar los arreglos */
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void checkCUDAError(const char* msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
530
|
#include <stdio.h>
#define T 8 // As Threads
#define N 16
__global__ void vecMatrixTransposed(int *A, int *B)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y+ threadIdx.y;
int width = gridDim.x * T;
for( int j = 0; j<T; j+=N )
{
B[x*width + (j+y)] = A[(y+j)*width + x];
}
}
int main (int argc, char *argv[])
{
int i,j;
int size[N*N];
int A[N][N];
int sizearr = N*N *sizeof(int);
int *Adefault,*B;
for (i=0; i< N; i++)
{
for(j = 0 ; j<N ; j++ )
{
A[i][j] = ((i*i) +1) * (j+1);
printf("%5d ", A[i][j]);
}
}
printf("\n");
cudaMalloc( (void**)&Adefault,sizearr);
cudaMalloc( (void**)&B,sizearr);
cudaMemcpy( Adefault, A, sizearr, cudaMemcpyHostToDevice);
dim3 dimBlock(T,T);
dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y);
vecMatrixTransposed<<<dimGrid,dimBlock>>>(Adefault,B);
cudaMemcpy(size, B, sizearr, cudaMemcpyDeviceToHost);
cudaFree(Adefault);
cudaFree(B);
printf("Result\n");
int newline = 0;
for (i=0; i < N * N; i++)
{
newline++;
printf("%3d ",size[i]);
if(newline == N)
{
newline = 0;
printf("\n");
}
}
printf("\n");
return 0;
}
|
531
|
#include <stdio.h>
#include <stdlib.h>
#define INPUT_SIZE 8
#define MASK_SIZE 4
#define RES_SIZE (MASK_SIZE+INPUT_SIZE-1)
#define THREAD_PER_BLOCK 2
#define N_BLOCKS (RES_SIZE+THREAD_PER_BLOCK-1)/THREAD_PER_BLOCK
/*
Debug runtime API function
*/
#define CHECK(call){\
const cudaError_t error=call;\
if( error!= cudaSuccess ){\
printf("Error %s %d, ",__FILE__,__LINE__);\
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));\
exit(1);\
}\
}\
/*
Compute the convolution between in and mask in the straightforward way
*/
__global__ void convolution(int *in,int *mask,int *res){
int n=blockIdx.x*blockDim.x+threadIdx.x;
int acc=0,j;
if(n<RES_SIZE){
for(j=0;j<MASK_SIZE;j++){
if(n-j>=0 && n-j<INPUT_SIZE)
acc+=in[n-j]*mask[j];
}
res[n]=acc;
}
}
int main(){
/*
Allocate memory for input and convolution mask
*/
int *input=(int*)malloc(INPUT_SIZE*sizeof(int)),
*mask=(int*)malloc(MASK_SIZE*sizeof(int)),
*res=(int*)malloc(RES_SIZE*sizeof(int));
int i;
/*
Declare pointer for device memory
*/
int *gpu_in,*gpu_mask,*gpu_res;
/*
Allocate memory on the device
*/
cudaMalloc((void**)&gpu_in,INPUT_SIZE*sizeof(int));
cudaMalloc((void**)&gpu_mask,MASK_SIZE*sizeof(int));
cudaMalloc((void**)&gpu_res,RES_SIZE*sizeof(int));
/*
Fill data arrays with integer values
*/
for(i=0;i<INPUT_SIZE;i++){input[i]=1;}
for(i=0;i<MASK_SIZE;i++){mask[i]=1;}
/*
Print data arrays
*/
printf("Input array\n");
for(i=0;i<INPUT_SIZE;i++){printf("%d ",input[i]);}
printf("\n");
printf("Convolution mask\n");
for(i=0;i<MASK_SIZE;i++){printf("%d ",mask[i]);}
printf("\n");
/*
Copy data to device memory
*/
cudaMemcpy(gpu_in,input,INPUT_SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_mask,mask,MASK_SIZE*sizeof(int),cudaMemcpyHostToDevice);
/*
Call CUDA kernel with params
*/
convolution<<<N_BLOCKS,THREAD_PER_BLOCK>>>(gpu_in,gpu_mask,gpu_res);
cudaDeviceSynchronize();
/*
Copy the result from device to host memory
*/
printf("Convolution mask: %d\n",MASK_SIZE);
printf("Input array size: %d\n",INPUT_SIZE);
printf("Result array size: %d\n",RES_SIZE);
printf("Number of blocks: %d\n",N_BLOCKS);
cudaMemcpy(res,gpu_res,RES_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
/*
Display result
*/
printf("Result\n\n");
for(i=0;i<RES_SIZE;i++){printf("%d ",res[i]);}
printf("\n");
/*
Free devcie memory
*/
cudaFree(gpu_in);
cudaFree(gpu_mask);
cudaFree(gpu_res);
/*
Free host memory
*/
free(input);
free(mask);
}
|
532
|
#include <stdio.h>
//
// kernel code
//
__global__ void my_first_kernel() {
printf("Hello from block (%d, %d), thread (%d, %d)\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y);
}
//
// host code
//
int main(int argc, char **argv) {
// set number of blocks, and threads per block
dim3 blocks_2d = dim3(2, 2);
dim3 threads_2d = dim3(3, 3);
// lanuch the kernel
my_first_kernel<<<blocks_2d,threads_2d>>>();
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
533
|
//Editor: Michael Lukiman
//Izhikevich spiking neuron network implementation in CUDA with added spatial winner-take-all dynamics
//GPU Architecture and Programming - Fall 2018
#include <stdio.h> //Standard input-output
#include <stdlib.h> //StandardLibraryfunctions
#include <iostream> //For streaming input-output operations
#include <math.h> //math
//Define parameters of the network:
const int excitatory=256;//Excitatory neurons(N_e)
const int inhibitory=256;//Inhibitory neurons(N_i)
const int total=excitatory+inhibitory;//Total Exc.+Inh. neurons
const int synapses=100;//synapses per neuron
const int delay=20;//in milliseconds, top axonal conduction delay
const int hz=total*120;//Upper bound of firingrate
const float max_weight=10.0;//Top synaptic weight
//Neuronal dynamics
float a[total];
float d[total];
//Activity variables
float v[total];
float u[total];
int num_fired;//amount of fired neurons
int spike[hz][2];//Timing of spikes, max of hz limit
//Spike-timing dependent variables (LTP,LTD)
float LTpot[total][delay];//Longterm potentiation
float LTdep[total];//Longterm depression
//Turn these relationships into data arrays
int ps_set[total][synapses];//Matrix holding the post-synaptic neurons(synapses) of each neuron(total)
float weights[total][synapses];//Matrix holding the weights of each synapse
float w_derivs[total][synapses];//Matrix holding the derivative of each above weight
int delays_length[total][delay];//Matrix holding the delay values of each neuron
int del_set[total][delay][synapses];//Matrix holding the delays to each synapse from each neuron
int pre_neuron[total];//Index of presynaptic information
int pre_input[total][synapses*3];//Presynaptic inputs
int pre_delay[total][synapses*3];//Presynaptic delays
float *pre_weights[total][synapses*3];//Presynaptic weights
float *pre_w_derivs[total][synapses*3];//Presynaptic derivatives
int pstochastic(int n) { // Pseudo-stochastic/random
return rand() % (int)(n);
}
void initialize()
{ int i,j,k,jj,dd, exists, r;
for (i=0;i<excitatory;i++) a[i]=0.02;// RS type
for (i=excitatory;i<total;i++) a[i]=0.1; // FS type
for (i=0;i<excitatory;i++) d[i]=8.0; // RS type
for (i=excitatory;i<total;i++) d[i]=2.0; // FS type
for (i=0;i<total;i++) for (j=0;j<synapses;j++)
{
do{
exists = 0; // avoid multiple synapses
if (i<excitatory) r = pstochastic(total);
else r = pstochastic(excitatory);// inh -> exc only
if (r==i) exists=1; // no self-synapses
for (k=0;k<j;k++) if (ps_set[i][k]==r) exists = 1; // synapse already exists
}while (exists == 1);
ps_set[i][j]=r;
}
for (i=0;i<excitatory;i++) for (j=0;j<synapses;j++) weights[i][j]=6.0; // initial exc. synaptic weights
for (i=excitatory;i<total;i++) for (j=0;j<synapses;j++) weights[i][j]=-5.0; // inhibitory synaptic weights
for (i=0;i<total;i++) for (j=0;j<synapses;j++) w_derivs[i][j]=0.0; // synaptic derivatives
for (i=0;i<total;i++)
{
short ind=0;
if (i<excitatory)
{
for (j=0;j<delay;j++)
{ delays_length[i][j]=synapses/delay; // uniform distribution of exc. synaptic delays
for (k=0;k<delays_length[i][j];k++)
del_set[i][j][k]=ind++;
}
}
else
{
for (j=0;j<delay;j++) delays_length[i][j]=0;
delays_length[i][0]=synapses; // all inhibitory delays are 1 ms
for (k=0;k<delays_length[i][0];k++)
del_set[i][0][k]=ind++;
}
}
for (i=0;i<total;i++)
{
pre_neuron[i]=0;
for (j=0;j<excitatory;j++)
for (k=0;k<synapses;k++)
if (ps_set[j][k] == i) // find all presynaptic neurons
{
pre_input[i][pre_neuron[i]]=j; // add this neuron to the list
for (dd=0;dd<delay;dd++) // find the delay
for (jj=0;jj<delays_length[j][dd];jj++)
if (ps_set[j][del_set[j][dd][jj]]==i) pre_delay[i][pre_neuron[i]]=dd;
pre_weights[i][pre_neuron[i]]=&weights[j][k]; // pointer to the synaptic weight
pre_w_derivs[i][pre_neuron[i]++]=&w_derivs[j][k];// pointer to the derivative
}
}
for (i=0;i<total;i++) for (j=0;j<1+delay;j++) LTpot[i][j]=0.0;
for (i=0;i<total;i++) LTdep[i]=0.0;
for (i=0;i<total;i++) v[i]=-65.0; // initial values for v
for (i=0;i<total;i++) u[i]=0.2*v[i]; // initial values for u
num_fired=1; // spike timings
spike[0][0]=-delay; // put a dummy spike at -delay for simulation efficiency
spike[0][1]=0; // index of the dummy spike
}
int main()
{
int i, j, k, sec, t;
float input[total];
FILE *fs;
initialize(); // assign connections, weights, etc.
for (sec=0; sec<60; sec++) // simulation of 1 day
{
for (t=0;t<1000;t++) // simulation of 1 sec
{
for (i=0;i<total;i++) input[i] = 0.0; // reset the input
for (k=0;k<total/1000;k++)
input[pstochastic(total)]=20.0; // random thalamic input
for (i=0;i<total;i++)
if (v[i]>=30) // did it fire?
{
v[i] = -65.0; // voltage reset
u[i]+=d[i]; // recovery variable reset
LTpot[i][t+delay]= 0.1;
LTdep[i]=0.12;
for (j=0;j<pre_neuron[i];j++) *pre_w_derivs[i][j]+=LTpot[pre_input[i][j]][t+delay-pre_delay[i][j]-1];// this spike was after pre-synaptic spikes
spike[num_fired ][0]=t;
spike[num_fired++][1]=i;
if (num_fired == hz) {std::cout << "Two many spikes at t=" << t << " (ignoring all)";num_fired=1;}
}
k=num_fired;
while (t-spike[--k][0] <delay)
{
for (j=0; j< delays_length[spike[k][1]][t-spike[k][0]]; j++)
{
i=ps_set[spike[k][1]][del_set[spike[k][1]][t-spike[k][0]][j]];
input[i]+=weights[spike[k][1]][del_set[spike[k][1]][t-spike[k][0]][j]];
if (spike[k][1] <excitatory) // this spike is before postsynaptic spikes
w_derivs[spike[k][1]][del_set[spike[k][1]][t-spike[k][0]][j]]-=LTdep[i];
}
}
for (i=0;i<total;i++)
{
v[i]+=0.5*((0.04*v[i]+5)*v[i]+140-u[i]+input[i]); // for numerical stability
v[i]+=0.5*((0.04*v[i]+5)*v[i]+140-u[i]+input[i]); // time step is 0.5 ms
u[i]+=a[i]*(0.2*v[i]-u[i]);
LTpot[i][t+delay+1]=0.95*LTpot[i][t+delay];
LTdep[i]*=0.95;
}
}
std::cout << "sec=" << sec << ", firing rate=" << float(num_fired)/total << "\n";
fs = fopen("spikes.dat","w+");
printf("%d", num_fired);
for (i=1;i<num_fired;i++)
if (spike[i][0] >=0)
fprintf(fs, "%d %d\nfffff", spike[i][0], spike[i][1]);
fclose(fs);
for (i=0;i<total;i++) // prepare for the next sec
for (j=0;j<delay+1;j++)
LTpot[i][j]=LTpot[i][1000+j];
k=num_fired-1;
while (1000-spike[k][0]<delay) k--;
for (i=1;i<num_fired-k;i++)
{
spike[i][0]=spike[k+i][0]-1000;
spike[i][1]=spike[k+i][1];
}
num_fired = num_fired-k;
for (i=0;i<excitatory;i++) // modify only exc connections
for (j=0;j<synapses;j++)
{
weights[i][j]+=0.01+w_derivs[i][j];
w_derivs[i][j]*=0.9;
if (weights[i][j]>max_weight) weights[i][j]=max_weight;
if (weights[i][j]<0) weights[i][j]=0.0;
}
}
}
|
534
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linearcuda_raj linear_raj_cuda.cu -lm
*
* To run:
* ./linearcuda_raj
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t{
double x;
double y;
}point_t;
int n_data = 1000;
__device__ int d_n_data =1000;
point_t data[] = {
{69.09,102.61},{82.13,152.08},{73.94,133.41},{67.94,130.43},
{83.26,134.77},{83.05,140.13},{79.17,149.94},{78.38,151.22},
{83.10,152.31},{73.50,140.04},{75.41,129.24},{87.18,142.30},
{65.12,132.63},{49.91,98.86},{ 5.65,45.64},{67.40,142.60},
{ 4.96,40.62},{45.49,89.81},{50.42,104.86},{89.77,164.46},
{18.97,51.92},{68.36,128.98},{ 5.51,20.69},{23.82,73.75},
{66.69,139.56},{93.58,169.37},{66.54,128.90},{13.22,50.82},
{46.69,84.63},{60.31,120.24},{61.09,105.13},{40.11,97.65},
{49.86,96.39},{53.57,110.10},{ 6.19,47.58},{12.38,64.19},
{78.14,141.00},{27.83,61.09},{41.00,76.48},{61.04,120.42},
{24.78,68.13},{52.50,107.45},{14.94,69.36},{35.97,79.09},
{40.30,73.31},{92.79,182.68},{ 2.26,38.15},{33.80,70.49},
{65.87,121.84},{45.00,78.95},{83.62,159.92},{35.90,85.48},
{ 6.00,51.97},{59.55,110.64},{18.14,49.26},{46.19,75.07},
{42.78,103.67},{21.68,66.82},{64.63,146.59},{96.75,168.65},
{20.19,44.40},{66.85,143.42},{61.71,116.91},{74.56,133.91},
{98.14,166.53},{91.82,154.60},{ 6.83,27.37},{91.03,163.50},
{41.90,92.72},{65.35,121.86},{59.70,116.10},{70.07,122.91},
{97.62,173.25},{21.01,55.66},{17.16,47.12},{71.91,131.35},
{ 0.31,17.13},{33.27,56.88},{69.57,128.78},{66.31,121.39},
{81.24,144.19},{90.57,176.58},{77.25,142.55},{94.31,162.47},
{42.88,84.68},{64.35,111.81},{ 2.18,26.05},{84.89,139.55},
{ 3.69,42.18},{46.86,111.42},{91.62,162.75},{48.86,103.04},
{11.69,49.69},{94.38,180.77},{35.87,68.86},{13.57,53.53},
{37.92,93.43},{53.11,100.27},{ 9.44,56.21},{66.37,119.22},
{96.89,143.15},{79.79,151.46},{29.43,104.35},{97.53,166.99},
{18.39,75.76},{41.46,88.34},{28.20,71.24},{15.77,48.03},
{23.44,72.10},{79.20,145.25},{ 6.38,37.64},{20.94,61.35},
{31.44,73.70},{58.22,111.84},{35.73,93.21},{54.87,115.86},
{92.84,161.21},{57.35,111.33},{48.13,104.70},{39.15,84.25},
{76.73,131.17},{33.16,86.42},{79.30,152.22},{32.79,85.88},
{61.08,124.63},{33.65,86.36},{30.30,67.58},{25.36,55.42},
{60.60,132.22},{54.55,107.11},{61.55,120.12},{21.43,51.10},
{55.28,114.45},{69.39,146.15},{71.36,136.26},{78.32,158.06},
{53.78,104.73},{41.16,88.31},{58.21,105.38},{22.62,48.68},
{57.70,107.41},{87.20,160.88},{53.56,112.60},{45.79,103.69},
{87.23,172.35},{26.91,66.80},{90.24,146.45},{10.52,51.41},
{88.18,177.14},{89.88,146.67},{ 3.40,44.19},{63.10,118.63},
{72.20,127.43},{ 7.21,44.09},{68.95,116.40},{93.06,141.28},
{ 1.78,47.44},{21.78,58.19},{95.38,167.88},{26.07,69.08},
{82.38,144.48},{44.78,87.81},{ 5.45,47.63},{14.61,46.51},
{47.12,97.36},{91.40,146.00},{ 3.92,31.27},{30.83,54.33},
{51.07,118.44},{11.70,45.32},{37.06,76.74},{93.60,154.41},
{88.67,158.77},{58.76,102.75},{18.18,71.37},{61.55,96.09},
{64.81,126.94},{38.89,80.15},{40.33,89.31},{ 6.98,42.89},
{26.84,77.22},{ 1.48,38.03},{69.64,125.26},{60.44,124.70},
{44.32,103.70},{23.96,61.48},{41.32,66.25},{76.06,136.13},
{74.79,139.10},{48.14,86.10},{90.45,175.78},{87.86,133.98},
{92.55,170.68},{58.06,111.65},{79.93,134.77},{61.20,126.32},
{99.31,173.71},{47.55,96.83},{ 5.95,58.93},{54.41,117.09},
{49.62,95.15},{20.48,52.60},{26.69,64.39},{77.38,138.43},
{21.50,61.58},{37.20,100.88},{26.12,60.30},{45.04,101.03},
{12.08,59.04},{13.59,46.57},{97.04,166.44},{58.34,109.75},
{ 4.47,41.91},{ 4.82,26.79},{49.25,120.66},{47.96,92.87},
{94.68,160.36},{38.52,78.12},{18.86,67.64},{65.01,128.12},
{94.62,168.74},{46.49,100.40},{18.67,59.96},{61.25,126.12},
{26.30,63.87},{16.32,75.77},{47.98,83.09},{97.67,167.17},
{90.04,155.34},{61.05,119.81},{34.12,79.69},{36.37,81.55},
{67.37,147.11},{94.03,166.73},{27.01,54.22},{14.88,38.59},
{53.43,101.46},{44.85,102.66},{96.98,172.50},{29.25,65.32},
{20.28,82.30},{82.38,148.74},{47.99,95.71},{54.39,113.38},
{24.78,60.98},{46.40,112.11},{83.11,154.36},{78.56,142.47},
{52.44,120.30},{85.45,139.05},{18.44,55.81},{33.82,91.05},
{81.25,148.47},{28.27,71.42},{28.72,81.91},{43.69,113.37},
{95.79,176.73},{20.00,66.36},{ 1.90,51.15},{69.08,102.50},
{66.37,126.22},{75.30,137.17},{50.89,107.35},{26.57,71.46},
{96.71,173.85},{61.50,136.37},{24.52,55.79},{ 3.31,25.80},
{58.29,121.62},{38.72,73.27},{58.48,109.58},{79.94,143.31},
{45.13,95.27},{14.01,41.17},{63.65,117.17},{73.39,150.88},
{57.61,118.65},{68.16,125.50},{78.05,134.05},{84.45,140.93},
{64.23,127.52},{51.91,105.59},{52.26,103.94},{48.43,74.06},
{45.19,95.99},{16.62,66.21},{34.10,70.76},{46.25,96.86},
{65.54,128.94},{73.65,134.18},{87.08,161.45},{45.86,101.31},
{99.65,155.49},{62.47,116.88},{60.77,123.30},{10.00,73.95},
{69.31,138.95},{99.81,190.04},{18.80,61.57},{11.29,37.86},
{32.22,61.46},{83.93,165.72},{23.79,61.15},{61.98,134.84},
{36.38,81.78},{91.53,162.22},{17.14,42.61},{31.77,81.24},
{41.24,88.26},{53.74,120.40},{ 7.59,46.01},{69.20,145.90},
{31.57,76.95},{41.83,103.94},{76.31,145.60},{34.57,86.48},
{78.44,140.01},{35.91,83.76},{67.19,122.40},{28.51,69.92},
{41.12,109.23},{38.78,89.35},{87.32,160.07},{81.17,141.79},
{27.93,80.99},{43.71,85.72},{35.55,76.40},{48.41,96.15},
{35.86,93.18},{58.45,103.95},{32.51,76.14},{22.98,47.63},
{84.80,150.30},{19.37,67.26},{62.21,124.12},{72.56,125.75},
{12.62,44.66},{84.63,150.02},{35.79,77.18},{91.59,167.42},
{56.79,135.96},{60.75,128.75},{75.17,132.66},{50.03,97.47},
{22.77,81.91},{79.14,140.88},{30.32,89.46},{64.27,117.96},
{14.61,72.92},{13.17,65.89},{73.53,123.85},{77.44,118.69},
{56.04,107.10},{46.18,92.11},{54.48,94.07},{73.79,151.52},
{ 8.83,39.47},{ 3.58,40.64},{33.35,83.41},{63.51,127.53},
{63.18,134.55},{38.65,77.96},{81.77,143.31},{28.99,71.78},
{46.57,107.91},{31.88,61.51},{69.75,121.75},{39.03,92.38},
{78.95,138.81},{33.03,62.89},{93.32,160.29},{51.73,96.94},
{99.52,178.80},{39.66,81.62},{20.81,63.06},{54.52,101.68},
{13.88,77.63},{11.54,47.90},{57.06,111.62},{34.85,73.47},
{ 1.03,38.38},{99.98,147.72},{15.65,37.52},{93.98,171.57},
{26.00,61.54},{26.34,97.49},{44.45,89.46},{30.19,85.94},
{65.17,121.23},{96.37,174.62},{17.59,57.65},{49.21,85.15},
{97.14,164.23},{69.46,128.85},{52.29,116.47},{59.90,97.23},
{41.62,96.65},{46.43,96.75},{64.79,120.04},{16.39,42.84},
{96.74,160.51},{15.06,67.38},{12.89,32.01},{59.36,96.22},
{36.49,85.66},{ 6.12,33.73},{87.56,144.59},{58.04,116.48},
{45.12,109.10},{90.29,155.38},{15.50,58.50},{ 8.17,39.33},
{36.71,87.05},{95.02,175.91},{56.45,102.69},{29.55,76.64},
{81.35,144.65},{51.25,106.51},{47.80,101.57},{39.43,89.38},
{16.54,75.06},{18.23,39.97},{38.47,84.60},{72.95,137.89},
{20.95,63.99},{53.89,104.26},{62.01,111.63},{77.09,132.07},
{75.62,131.00},{82.52,143.40},{29.78,67.90},{24.28,65.09},
{60.18,116.00},{64.87,116.99},{66.81,118.50},{97.63,173.78},
{82.52,145.14},{17.04,56.60},{69.23,128.14},{11.14,35.51},
{44.81,75.59},{79.91,130.95},{73.98,123.15},{30.70,68.34},
{16.56,69.96},{44.54,111.55},{42.63,98.01},{ 8.33,52.94},
{23.70,81.71},{72.51,126.68},{51.65,91.93},{18.86,52.82},
{51.40,105.16},{33.69,78.93},{48.25,95.34},{95.60,165.19},
{70.55,135.12},{ 7.54,25.40},{84.10,147.82},{ 5.46,55.62},
{51.25,105.36},{33.31,70.29},{91.09,155.95},{93.79,150.87},
{39.64,105.11},{23.94,70.09},{ 2.79,57.46},{93.12,157.26},
{82.66,153.35},{20.67,50.18},{25.48,55.36},{73.93,125.80},
{82.75,164.55},{66.85,115.72},{ 6.63,57.24},{56.61,128.98},
{73.65,146.00},{28.61,76.79},{80.74,154.19},{42.26,82.19},
{48.99,96.21},{29.42,74.92},{47.75,96.08},{30.98,74.08},
{21.47,58.18},{40.25,84.61},{71.79,134.58},{62.21,100.02},
{ 6.50,44.15},{23.18,54.16},{99.44,173.73},{34.55,74.88},
{44.42,90.87},{94.40,160.68},{44.12,83.50},{38.81,83.14},
{17.89,48.98},{41.90,89.86},{53.95,124.31},{34.65,77.79},
{99.46,188.27},{45.96,96.10},{56.07,108.36},{52.48,94.99},
{51.23,105.75},{13.28,48.42},{81.65,142.71},{27.37,57.49},
{47.88,98.01},{45.77,83.07},{67.87,113.75},{ 6.56,20.13},
{57.51,99.93},{45.05,98.83},{86.15,154.73},{32.01,66.76},
{10.76,37.49},{15.01,59.42},{56.76,111.61},{34.09,78.70},
{88.70,179.11},{50.88,120.11},{22.56,51.27},{77.01,143.66},
{31.56,69.66},{45.50,100.94},{64.12,114.38},{86.51,162.80},
{91.85,153.99},{89.59,139.43},{62.84,120.04},{ 1.00,45.36},
{76.04,134.88},{ 2.31,39.43},{ 4.08,36.49},{50.56,91.92},
{80.18,165.25},{84.88,150.80},{85.96,154.04},{58.01,121.88},
{83.62,142.62},{78.78,133.92},{24.57,47.24},{10.38,51.07},
{70.57,129.78},{ 6.24,63.42},{95.52,158.82},{47.53,99.59},
{37.80,86.23},{57.04,125.93},{98.17,189.61},{18.58,56.15},
{43.64,95.36},{97.10,155.81},{41.32,104.04},{58.80,110.09},
{46.92,109.08},{55.66,115.05},{43.44,91.67},{ 1.16,18.39},
{44.90,79.00},{88.95,146.71},{77.11,140.55},{38.22,70.52},
{33.63,94.80},{82.24,147.90},{32.39,76.09},{68.71,137.44},
{61.91,139.84},{ 2.72,31.93},{31.01,83.44},{43.82,82.06},
{90.41,138.37},{74.29,146.66},{34.46,82.13},{27.66,70.23},
{ 0.75,35.40},{70.47,122.31},{38.41,74.27},{59.11,111.32},
{94.20,145.15},{61.79,112.71},{25.69,72.79},{17.38,73.54},
{ 2.57,27.14},{40.88,85.46},{59.63,106.14},{23.24,43.26},
{24.06,73.14},{ 3.38,46.52},{24.26,58.40},{93.00,155.20},
{48.69,81.21},{32.00,80.68},{67.43,124.66},{76.88,118.26},
{97.26,169.42},{52.05,86.99},{26.68,80.02},{88.80,164.27},
{30.79,92.40},{13.76,46.00},{ 4.39,20.46},{47.45,110.48},
{22.42,68.09},{39.34,105.98},{90.58,167.10},{45.22,80.64},
{ 2.29,24.47},{51.00,101.65},{16.63,52.64},{43.24,91.93},
{33.47,77.44},{69.44,121.59},{93.63,154.70},{97.34,176.12},
{82.83,145.39},{84.31,123.04},{51.96,110.56},{13.79,49.08},
{85.09,147.33},{19.50,64.58},{25.47,81.74},{51.68,106.78},
{27.99,75.05},{ 2.78,60.03},{93.13,166.31},{27.66,83.80},
{43.98,86.41},{ 6.34,27.97},{41.25,74.57},{10.49,37.94},
{94.29,159.02},{33.41,82.45},{19.80,64.16},{18.35,65.02},
{93.57,182.68},{ 7.90,51.96},{85.56,157.84},{50.76,88.32},
{65.70,121.81},{72.32,151.58},{23.45,66.39},{ 4.44,33.36},
{58.98,115.21},{12.18,46.07},{30.66,60.74},{69.63,124.96},
{ 2.69,37.64},{16.96,57.25},{60.58,126.99},{73.60,135.37},
{11.98,68.00},{ 0.42,38.89},{47.45,90.83},{44.59,89.17},
{ 3.95,27.62},{78.31,145.77},{81.91,153.36},{47.00,109.88},
{ 3.10,31.96},{59.53,105.57},{14.67,55.59},{30.44,90.14},
{64.61,123.88},{40.33,96.71},{67.44,133.61},{42.29,68.72},
{44.38,90.29},{65.86,136.25},{91.75,157.96},{24.71,74.32},
{89.50,153.96},{95.40,160.30},{ 2.51,32.12},{51.63,109.59},
{41.35,101.13},{19.94,59.70},{65.45,132.43},{38.21,117.97},
{40.69,84.45},{50.78,126.50},{47.42,103.31},{ 6.98,47.56},
{95.49,162.77},{62.64,129.54},{45.89,106.75},{71.43,126.03},
{95.26,169.84},{81.30,128.51},{16.16,48.62},{ 8.97,75.32},
{28.86,95.51},{10.27,41.50},{78.84,153.22},{83.90,173.33},
{51.33,105.65},{36.43,88.93},{70.23,131.60},{23.96,67.38},
{45.21,97.59},{76.50,118.70},{63.22,105.01},{33.75,102.37},
{72.57,140.64},{18.16,68.62},{27.39,71.28},{ 3.22,51.94},
{89.94,151.77},{23.53,59.07},{18.69,44.83},{25.87,75.41},
{90.76,147.03},{39.84,92.09},{89.20,136.14},{ 1.70,38.68},
{32.49,69.34},{ 6.78,38.27},{32.36,78.73},{57.57,128.00},
{81.11,147.45},{93.22,161.07},{75.48,137.02},{70.72,129.74},
{33.18,80.42},{16.09,52.35},{12.92,42.89},{14.63,47.45},
{16.33,60.65},{26.52,60.33},{65.75,144.10},{60.94,100.72},
{28.23,76.55},{20.77,77.79},{46.35,83.09},{85.82,151.43},
{51.71,91.53},{42.60,70.64},{ 4.22,48.90},{61.25,93.32},
{14.76,46.45},{44.55,83.34},{57.30,106.17},{66.70,130.32},
{47.19,113.56},{14.60,19.99},{29.04,67.68},{72.16,136.94},
{24.30,60.44},{44.74,96.11},{58.89,105.52},{53.13,107.86},
{67.93,143.40},{60.48,113.91},{47.24,104.36},{58.88,114.66},
{80.90,148.45},{58.37,113.83},{89.03,138.68},{ 8.12,41.01},
{24.14,68.27},{37.59,74.19},{44.67,83.16},{ 5.31,35.62},
{57.88,103.64},{76.66,135.69},{47.92,97.61},{34.11,71.58},
{73.72,133.44},{67.16,123.69},{ 2.70,45.81},{13.06,47.63},
{70.19,132.68},{40.77,85.91},{68.57,122.66},{83.35,147.40},
{95.10,159.26},{76.55,140.12},{44.42,92.71},{77.80,141.02},
{18.03,54.85},{16.33,44.89},{54.49,102.97},{88.63,149.21},
{53.20,104.65},{68.60,128.48},{13.34,52.97},{17.39,48.69},
{95.50,153.55},{75.90,130.21},{93.37,171.51},{59.28,117.72},
{22.33,72.20},{31.01,81.27},{16.35,50.69},{ 9.23,54.49},
{86.04,158.82},{46.01,99.38},{32.61,47.24},{14.21,40.26},
{37.28,76.21},{60.05,106.09},{94.82,165.99},{98.03,169.17},
{ 8.14,37.52},{29.47,58.43},{91.59,180.13},{64.46,123.22},
{81.92,164.97},{40.79,93.04},{98.66,170.59},{85.47,151.64},
{13.01,51.89},{69.39,134.86},{77.60,128.60},{89.83,157.49},
{22.48,52.09},{61.31,118.09},{39.29,91.09},{57.26,80.48},
{62.54,129.76},{30.22,81.60},{21.56,60.61},{65.89,99.43},
{78.90,151.00},{93.57,158.74},{38.70,76.63},{ 8.25,47.63},
{91.94,169.65},{54.05,110.82},{98.12,177.20},{17.59,55.03},
{74.27,140.33},{58.40,122.38},{ 6.27,35.39},{90.31,164.73},
{87.59,150.48},{47.17,97.51},{71.25,127.19},{56.58,106.56},
{52.14,108.51},{24.20,80.20},{30.66,79.03},{59.72,116.04},
{60.16,123.78},{22.09,68.92},{45.29,111.19},{92.16,156.21},
{45.05,113.83},{81.73,145.26},{34.92,85.83},{13.92,67.65},
{24.14,70.45},{81.85,153.42},{41.19,85.93},{ 0.67,39.26},
{36.09,90.68},{19.54,62.25},{33.38,74.35},{10.19,55.59},
{59.29,93.03},{11.58,62.19},{87.30,152.51},{25.43,59.13},
{74.35,134.81},{65.23,139.29},{32.23,83.27},{70.94,130.63},
{70.57,128.03},{15.13,65.98},{88.17,138.65},{ 9.49,43.25},
{68.37,120.54},{74.69,143.68},{50.10,119.13},{ 8.76,32.56},
{37.70,101.07},{ 4.04,42.48},{29.84,97.01},{69.88,130.23},
{ 8.65,45.47},{66.47,118.24},{12.35,55.24},{20.41,64.50},
{91.28,166.16},{91.82,165.77},{10.65,52.39},{79.95,147.31},
{16.06,57.21},{71.00,131.19},{91.27,155.74},{28.57,60.74},
{31.63,92.00},{15.84,50.24},{80.22,135.00},{51.52,96.40},
{67.42,117.71},{15.46,65.55},{85.71,151.69},{99.32,165.22},
{28.38,107.61},{75.32,129.56},{96.48,161.61},{36.18,95.12},
{34.18,71.39},{ 7.77,40.69},{37.98,65.63},{68.95,131.41},
{34.08,78.15},{29.70,84.37},{92.61,178.05},{82.22,128.06},
{ 1.32,32.39},{ 2.57,41.54},{72.80,142.40},{19.88,47.67},
{95.42,167.29},{ 7.56,63.21},{90.52,146.47},{67.80,115.47},
{89.41,162.67},{87.72,165.63},{71.28,138.33},{25.08,70.34},
{41.57,86.86},{51.45,104.07},{61.54,121.29},{32.53,93.45},
{ 3.26,29.72},{74.50,137.34},{45.07,102.67},{67.55,124.04},
{38.14,88.93},{ 2.46,37.56},{21.27,66.20},{32.26,67.66},
{36.05,64.93},{78.82,147.50},{ 7.26,40.83},{86.28,157.01},
{49.02,95.59},{80.36,136.69},{ 2.94,46.23},{50.55,102.49},
{28.35,69.73},{19.99,77.88},{22.42,72.52},{15.98,58.85},
{68.79,130.10},{23.15,58.87},{ 3.27,40.79},{79.02,150.83},
{37.04,72.00},{45.11,79.16},{30.61,58.70},{74.69,133.55},
{25.47,71.56},{89.34,157.40},{61.39,125.26},{ 1.52,31.44},
{78.37,136.81},{48.33,123.74},{71.58,126.89},{ 5.20,49.01},
{90.20,142.37},{94.03,153.03},{97.27,167.60},{91.90,156.09},
{56.45,131.39},{59.09,118.83},{61.75,115.24},{22.31,75.23},
{73.98,142.59},{24.38,61.52},{73.46,110.70},{27.35,85.50},
{55.66,115.25},{67.56,119.99},{61.81,130.15},{12.05,48.88},
{84.16,146.44},{48.33,99.47},{83.37,138.40},{23.51,81.53},
{85.14,138.65},{92.12,176.32},{53.13,115.81},{90.59,162.45},
{92.52,182.64},{76.45,149.04},{48.40,96.60},{ 8.70,45.67},
{96.13,174.15},{21.67,43.90},{15.13,43.97},{73.66,137.40},
{80.31,136.39},{79.59,139.56},{55.33,112.94},{27.38,47.87}
};
double residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
__device__ double d_residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=cudaMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
cudaDeviceSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
535
|
//xfail:TIMEOUT
//--blockDim=32 --gridDim=64 --no-inline
#include "cuda.h"
#define N 32
__global__ void foo(int* p) {
__shared__ unsigned char x[N];
for (unsigned int i=0; i<(N/4); i++) {
((unsigned int *)x)[i] = threadIdx.x;
}
}
|
536
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
cudaError_t backPropagation(double *out, double *x,
double *y, double *W, unsigned int row, unsigned int column, double eta);
__global__ void subtractKernel(double *out, double *y, unsigned int row)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= row) {
return;
}
out[thread_idx] -= y[thread_idx];
}
__global__ void updateWKernel(double *out, double *x, double *W,
unsigned int row, unsigned int column, double eta)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= row * column) {
return;
}
W[thread_idx] = W[thread_idx] - eta * out[thread_idx / column] * x[thread_idx % column];
}
int main(int argc, char *argv[])
{
unsigned int row = atoi(argv[1]);
unsigned int column = atoi(argv[2]);
double eta = atof(argv[3]);
double *W = (double*)malloc(row * column * sizeof(double));
double *x = (double*)malloc(column * sizeof(double));
double *y = (double*)malloc(row * sizeof(double));
double *out = (double*)malloc(row * sizeof(double));
for (int i = 0; i < column; i++) {
x[i] = 10;
}
for (int i = 0; i < row * column; i++) {
W[i] = 10;
}
for (int i = 0; i < column; i++) {
y[i] = 10;
}
for (int i = 0; i < column; i++) {
out[i] = 11;
}
// Add vectors in parallel.
cudaError_t cudaStatus = backPropagation(out, x, y, W, row, column, eta);
for (int i = 0; i < row; i++) {
for (int j = 0; j < column; j++) {
printf("%.2f ", W[i * column + j]);
}
printf("\n");
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t backPropagation(double *out, double *x,
double *y, double *W, unsigned int row, unsigned int column, double eta)
{
double *dev_out = 0;
double *dev_x = 0;
double *dev_y = 0;
double *dev_W = 0;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dev_out, row * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_x, column * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_y, row * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_W, row * column * sizeof(double));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_out, out, row * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_x, x, column * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, row * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W, W, row * column * sizeof(double), cudaMemcpyHostToDevice);
// Compute (out - y) to get the differential of cost on predictions
subtractKernel<<<column / 512 + 1, 512>>>(dev_out, dev_y, row);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Compute the differential of cost on weights and update weights: W = W - eta*(delta*(x)T)
updateWKernel<<<row * column / 512 + 1, 512>>>(dev_out, dev_x, dev_W, row, column, eta);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(W, dev_W, row * column * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_out);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_W);
return cudaStatus;
}
|
537
|
#include <stdio.h>
#include <iostream>
__global__ void helloFromGPU()
{
printf("Hello world from GPU using C++\n");
// A line below doesn't work!
// std::cout << "Hello world from GPU using C++" << std::endl;
}
int main(int argc, char const* argv[])
{
std::cout << "Hello world from cpu using C++" << std::endl;
helloFromGPU <<<1, 10>>>();
return 0;
}
|
538
|
/*
By: Carrick McClain
Sources:
http://csweb.cs.wfu.edu
https://stackoverflow.com
http://www.cplusplus.com
https://devtalk.nvidia.com
https://docs.nvidia.com/cuda/cuda-c-programming-guide
*/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
inline void gpu_handle_error( cudaError_t err, const char* file, int line, int abort = 1 )
{
if (err != cudaSuccess)
{
fprintf (stderr, "gpu error: %s, %s, %d\n", cudaGetErrorString (err), file, line);
if (abort)
exit (EXIT_FAILURE);
}
}
#define gpu_err_chk(e) {gpu_handle_error( e, __FILE__, __LINE__ );}
/*
Integral Functions
You can replace any invoked math function with another.
To test this, you can replace the function calls in the
trapezoidal functions (host & device) with any of the others below.
I tried to implement these functions with functors, but they didn't work
as expected with device code. */
float func_1a( float input )
{
return 1/(1+input*input);
}
__device__ float func_1b( float input )
{
return 1/(1+input*input);
}
// function 2 (host & gpu versions)
float func_2a( float input )
{
return ((1.0*input*input) + (3.0*input*input) + 5.0);
}
__device__ float func_2b( float input )
{
return ((1.0*input*input) + (3.0*input*input) + 5.0);
}
//function 3 (host & gpu versions)
float func_3a( float input )
{
return ((2.0*input*input*input) / (5.0*input*input));
}
__device__ float func_3b( float input )
{
return ((2.0*input*input*input) / (5.0*input*input));
}
// Serial trapezoidal rule function.
// Change around the commented lines to run it with other math functions.
float trapezoidal( float a, float b, float n )
{
float delta = (b-a)/n;
float s = func_1a(a) + func_1a(b);
// float s = func_2a(a) + func_2a(b);
// float s = func_3a(a) + func_3a(b);
for( int i = 1; i < n; i++ )
{
s += 2.0*func_1a(a+i*delta);
// s += 2.0*func_2a(a+i*delta);
// s += 2.0*func_3a(a+i*delta);
}
return (delta/2)*s;
}
// Parallelized trapezoidal rule function.
// Change around the commented lines to run it with other math functions.
__global__ void trapezoidal_kernel( float a, float b, float n, float* d_output )
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
float delta = (b-a)/n;
float s = a + (float)tid * delta;
if( tid < n )
{
d_output[tid] = func_1b(s) + func_1b(s + delta);
// d_output[tid] = func_2b(s) + func_2b(s + delta);
// d_output[tid] = func_3b(s) + func_3b(s + delta);
}
}
int main()
{
// starts CUDA context, absorbs cost of startup
// while starting, the program may seem to hang for a few seconds!
// don't worry, it will work eventually.
cudaFree(0);
// initializations
cudaError_t err;
float a = 0.0f; // interval start
float b = 1.0f; // interval end
int n = 10000; // number of trapezoids
float delta = (b-a)/n;
float parallel_result = 0.0f;
float* h_kernel_output = (float*)malloc(n * sizeof(float));
float* d_kernel_output;
cout.precision(5);
// print out host function result
cout << "Function 1: " << endl;
cout << "Serial: Value of integral is " << trapezoidal(a, b, n) << endl;
/*
Now the parallel part.
The cudaMalloc was taking tons of time when I tested, not sure why.
That's why I made the cudaFree(0) at the beginning.
It absorbs the time cost of setting up the CUDA context,
so the cudaMalloc() then takes much less time to execute. */
err = cudaMalloc( (void**) &d_kernel_output, n * sizeof(float) );
gpu_err_chk(err);
err = cudaMemcpy( d_kernel_output, h_kernel_output, n * sizeof(float), cudaMemcpyHostToDevice );
gpu_err_chk(err);
// call kernel function
dim3 dimGrid (40); // threads/n -> 256 threads/block -> 40 blocks needed
dim3 dimBlock (256); // 256
trapezoidal_kernel<<<dimGrid, dimBlock>>>( a, b, n, d_kernel_output);
err = cudaGetLastError();
gpu_err_chk(err);
// copy data back from device
err = cudaMemcpy( h_kernel_output, d_kernel_output, n * sizeof(float), cudaMemcpyDeviceToHost );
gpu_err_chk(err);
// get correct sum of trapezoid array
for( int i=0; i<n; i++ )
{
parallel_result += h_kernel_output[i];
}
parallel_result *= delta/2.0;
// print out device function result
printf("Parallel: Value of integral is %6.4f\n", parallel_result);
// free up memory
free(h_kernel_output);
cudaFree(d_kernel_output);
return 0;
}
|
539
|
#include <iostream>
// #include <tuple>
// thrust includes
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <cstdlib>
using namespace std;
// ## Part 1: product template
template <class T> T productCalculation (T a, T b) {
return a*b;
}
// ## Part 6
struct simpleProd { // first a non-tumple version to start up with structs
int operator() (int a, int b) {
return a*b;
}
};
struct tupleProd : public unary_function<int,int> {
__host__ __device__ int operator() (const thrust::tuple<int,int> &ab) {
return thrust::get<0>(ab) * thrust::get<1>(ab);
}
};
// ## Part 9
template <typename T> struct templateMod : public unary_function<T,T> { // templated version, built this for bugtracking
int wrt;
templateMod(int _wrt) : wrt(_wrt) {}
__host__ __device__ T operator() (const T &x) const {
return x % wrt;
}
};
struct sMod {
int wrt;
sMod(int _wrt) : wrt(_wrt) {}
__host__ __device__ int operator() (int x) {
return x % wrt;
}
};
// ## Part 10
struct tupleMod {
__host__ __device__ int operator() (const thrust::tuple<int, int> numberAndModuland) { // "moduland" I invented to give a name to the x in a mod x
return thrust::get<0>(numberAndModuland) % thrust::get<1>(numberAndModuland);
}
};
int main (int argc, char** argv) {
/* ####################
* TEMPLATE STUFF
* ##################*/
int a_int = 2;
int b_int = 4;
float a_float = 2.3;
float b_float = 4.2;
double a_double = 2.323;
double b_double = 4.242;
cout << "## Part 1: Multiplying by template" << endl;
cout << "# 1.1 - int: ";
cout << a_int << " * " << b_int << " = " << productCalculation(a_int, b_int) << endl;
cout << "# 1.2 - float: ";
cout << a_float << " * " << b_float << " = " << productCalculation(a_float, b_float) << endl;
cout << "# 1.3 - double: ";
cout << a_double << " * " << b_double << " = " << productCalculation(a_double, b_double) << endl;
/* #####################
* THRUST
* ####################*/
cout << "## Part 2: First Usage of Thrust -- make two device vectors with randomized entries" << endl;
int sizeOfVector = 100; // use array with 100 entries or ...
if (argc > 1) sizeOfVector = atoi(argv[1]); // use what has been specified by cmd line
thrust::host_vector<int> h_vec1(sizeOfVector); // initialize host vectors
thrust::host_vector<int> h_vec2(sizeOfVector);
srand(23);
for (int i = 0; i < sizeOfVector; ++i) { // fill host vectors randomized
h_vec1[i] = rand() % 100;
h_vec2[i] = rand() % 100;
cout << "# Filled random numbers, h_vec1[" << i << "] = " << h_vec1[i] << ", h_vec2[" << i << "] = " << h_vec2[i] << endl;
}
thrust::device_vector<int> d_vec1 = h_vec1; // copy host vectors to device vectors
thrust::device_vector<int> d_vec2 = h_vec2;
cout << "## Part 3: Use transform() on third device_vector" << endl;
thrust::device_vector<int> d_res(sizeOfVector);
thrust::transform(d_vec1.begin(), d_vec1.end(), d_vec2.begin(), d_res.begin(), thrust::multiplies<int>());
cout << "## Part 4: Use reduce() on third device_vector" << endl;
int total = reduce(d_res.begin(), d_res.end()); // ", (int) 0, thrust::plus<int>()" has been reduced ;)
cout << "# Reduction is = " << total << endl;
cout << "## Part 5: Use inner_product() for single-kernel dot product" << endl;
int innertotal = thrust::inner_product(d_vec1.begin(), d_vec1.end(), d_vec2.begin(), 0);
cout << "# Inner Product is = " << innertotal << endl;
cout << "## Part 6: Multiply by struct" << endl;
simpleProd mult;
cout << "# Simple struct product of " << a_int << " and " << b_int << " is " << mult(a_int, b_int) << endl;
tupleProd fancyMult;
cout << "# Fancy struct product of " << a_int << " and " << b_int << " is " << fancyMult(thrust::make_tuple(a_int,b_int)) << endl;
cout << "## Part 7: zip_iterator() " << endl;
thrust::tuple<int, int> zero = thrust::make_tuple(0, 0);
int zippedProd = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(d_vec1.begin(), d_vec2.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_vec1.end(), d_vec2.end())),
tupleProd(),
0,
thrust::plus<int>());
cout << "# Transformed reduction using zip_it() " << zippedProd << endl;
cout << "## Part 8: Modulus struct" << endl;
// see above
cout << "## Part 9: transform_reduce() with modulus struct" << endl;
int modWhat = 2; // mod what?!
cout << "# Number of odd entries " << thrust::transform_reduce(d_vec1.begin(), d_vec1.end(), sMod(modWhat), 0, thrust::plus<int>()) << endl;
cout << "## Part 10: Modulus by tuple" << endl;
thrust::constant_iterator<int> constIt(2);
int tempNumber = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(d_vec1.begin(),constIt)),thrust::make_zip_iterator(thrust::make_tuple(d_vec1.end(),constIt)),
tupleMod(),
0,
thrust::plus<int>());
cout << "# Number of odd entries " << tempNumber << endl;
return 0;
}
|
540
|
__device__ void color(float r, float g, float b, unsigned char* buffer){
if(r>255)
buffer[0] = 255;
else
buffer[0] = (unsigned char)(r);
if(r>255)
buffer[1] = 255;
else
buffer[1] = (unsigned char)(g);
if(r>255)
buffer[2] = 255;
else
buffer[2] = (unsigned char)(b);
}
__device__ struct mat2{
float N[2];
float M[2];
__device__ mat2(float a, float b, float c, float d)
{
N[0] = a;
N[1] = b;
M[0] = c;
M[1] = d;
}
};
__device__ struct vec2{
float x;
float y;
__device__ vec2 (){}
__device__ vec2 (float x, float y): x(x), y(y){}
__device__ vec2 ( float a): x(a),y(a){}
__device__ vec2 operator+(const vec2& v) const
{
return {v.x+x,v.y+y};
}
__device__ vec2 operator-(const vec2& v) const
{
return {x-v.x,y-v.y};
}
};
//vec2 calculations except operators
__device__ float length(vec2 v){
return sqrt(v.x*v.x + v.y*v.y);}
__device__ float dist(vec2 posa, vec2 posb){
float dx,dy;
dx = posa.x - posb.x;
dy = posa.y -posb.y;
return sqrt(dx*dx + dy*dy);}
__device__ vec2 abs(vec2 v){
return {abs(v.x),abs(v.y)};}
//vec3 can begin
__device__ struct vec3 {
float x;
float y;
float z;
//Constructors in different ways
__device__ vec3 (){}
__device__ vec3 (float x, float y, float z):x(x),y(y),z(z){}
__device__ vec3(float a):x(a),y(a),z(a){}
__device__ vec3(vec2 v, float z):x(v.x),y(v.y),z(z){}
__device__ vec3 operator+(const vec3& a) const{
return {a.x + x, a.y+y, a.z+z};}
__device__ vec3 operator-(const vec3& a) const{
return {x-a.x,y-a.y, z-a.z};}
__device__ vec3 operator*(const vec3& a) const{
return {a.x*x, a.y*y , a.z*z };
}
__device__ vec3 operator*(const float a) const{
return {x*a, y*a, z*a};}
__device__ vec3 operator/(const float a) const{
return {x/a, y/a, z/a};}
};
__device__ vec3 cross(vec3 a, vec3 b){
return {a.y*b.z - b.y * a.z, a.z*b.x - b.z*a.x, a.x*b.y -b.x*a.y};
}
__device__ void color(vec3 c, unsigned char* buffer){
if (c.x < 0)
buffer[0] = 0;
else if (c.x > 255)
buffer[0] = 255;
else
buffer[0] = (unsigned char)(c.x);
if (c.y < 0)
buffer[1] = 0;
else if (c.y > 255)
buffer[1] = 255;
else
buffer[1] = (unsigned char)(c.y);
if (c.z < 0)
buffer[2] = 0;
else if (c.z > 255)
buffer[2] = 255;
else
buffer[2] = (unsigned char)(c.z);
}
__device__ float dot(vec3 a, vec3 b){
return a.x*b.x + a.y*b.y + a.z*b.z;}
// I is the Incident vector (direction) and N the Normal vector of the surface
__device__ vec3 reflect(vec3 I, vec3 N){
return I - N *dot(N,I)*2;
}
//can i put this to perators????
__device__ vec3 abs(vec3 v){
return {abs(v.x),abs(v.y),abs(v.z)};}
__device__ float length(vec3 v){
return sqrt(v.x*v.x + v.y*v.y + v.z*v.z);}
__device__ float dist(vec3 a, vec3 b){
float dx,dy,dz;
dx = a.x-b.x;
dy = a.y-b.y;
dz = a.z-b.z;
return sqrt(dx*dx+dy*dy+dz*dz);}
__device__ vec3 normalize(vec3 v){
float betrag = length(v);
return {v.x/betrag,v.y/betrag,v.z/betrag};}
//not sure about the following 2 formulas, a max between a vector and a float seems weird to me
__device__ float mb(float a, float mx){
return a > mx ? a : mx;
}
__device__ vec3 max(vec3 v, float d){
return {mb(v.x,d),
mb(v.y,d),
mb(v.z,d)};}
//i still wonder what i need a 4th dimension for, but we will see
__device__ struct vec4{
float x;
float y;
float z;
float w;
__device__ vec4(){}
__device__ vec4(float x, float y, float z, float w):x(x),y(y),z(z),w(w){}
__device__ vec4(vec3 v, float w): x(v.x), y(v.y), z(v.y), w(w){}
__device__ vec4(float a):x(a),y(a),z(a),w(a){}
__device__ vec4(vec2 v1, vec2 v2): x(v1.x),y(v1.y),z(v2.x),w(v2.y){}
};
//init global rot variable and the functions to set during runtime
//remember that __host__ function always has to be inside extern "C" to be accessed by C-types
//btw check if there might be a less dirty way to acess the __device__ variable from __host__ function
__device__ unsigned short int rot = 0;
__global__ void rotation(unsigned short int angle){
rot = angle;}
extern "C"{__host__ void rotate(unsigned short int angle){
rotation<<<1,1>>>(angle);}}
//same here with the frame
__device__ float frame = 0;
__device__ float sinfr = 0;
__device__ float cosfr = 0;
__global__ void set_frame_g(unsigned int f){
frame = (float)f;
sinfr = sin(frame*M_PI/180);
cosfr = cos(frame*M_PI/180);}
extern "C"{__host__ void set_frame(unsigned int f){
set_frame_g<<<1,1>>>(f);}}
__device__ vec2 mouse;
__global__ void set_mouse_g(float x, float y){
mouse.x = x*2;
mouse.y = y*2;}
extern "C"{__host__ void set_mouse(float x, float y){
set_mouse_g<<<1,1>>>(x,y);}}
__device__ vec2 window;
__device__ vec2 windowD2;
__global__ void set_window_g(int x, int y){
window.x = (float)(x);
window.y = (float)(y);
windowD2.x = (float)(x)/2;
windowD2.y = (float)(y)/2;}
extern "C"{ __host__ void set_window(int x, int y){
set_window_g<<<1,1>>>(x,y);}}
//nice functionalities for floats to have in Shady programming
__device__ float fract(float f){
return f - floor(f);}
__device__ float step(float a, float b){
if (abs(a) < abs(b))
return 1;
else
return 0;}
__device__ float clamp(float x, float minVal, float maxVal){
return min(max(x,minVal),maxVal);}
__device__ vec2 min(vec2 v1, vec2 v2){
if (v1.x<v2.x)
return v1;
else
return v2;
// vec2.y soll in dem Falle die Variable für "solid/glas/mirror" sein, die abstandtsfunktion und so müssen von float auf vec2 geändert werden
//eine neue Funktion dann auch für raymarch innerhalb glas
// und eine für spiegelung
//alternativ eine raymarch funktion erst für glas, dann für spiegel, dann für solids erstellen
}
__device__ vec2 max(vec2 v1, vec2 v2){
if (v1.x>v2.x)
return v1;
else
return v2;
}
__device__ float mix(float v1,float v2, float a){
return v1* (1-a) + v2*a;}
//define smin for vec2 returns....
__device__ float smin(float a, float b, float k){//smooth min, very nice
float h = clamp(0.5 + 0.5 * (b-a)/k,0.1,1.0);
return mix(b,a,h) - k*h*(1.0-h);}
|
541
|
#include <cuda_runtime.h>
__global__ void binarizeKernel(uchar4* pData, unsigned char threshold)
{
// get the position for the current thread
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const unsigned int tid = y * (gridDim.x * blockDim.x) + x;
// get binarization result
unsigned char value = pData[tid].x > threshold ? 255 : 0;
// write the value back to the global memory
pData[tid].x = value;
pData[tid].y = value;
pData[tid].z = value;
}
void binarize(uchar4* pDataIn, uchar4* pDataOut, int width, int height, unsigned char threshold)
{
// allocate device memory
uchar4* pDevData;
unsigned int mem_size = sizeof(uchar4) * width * height;
cudaMalloc((void **) &pDevData, mem_size);
// copy results from host to device
cudaMemcpy(pDevData, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
// run the cuda kernel
binarizeKernel<<<numBlocks, threadsPerBlock>>>(pDevData, threshold);
// copy results from device to host
cudaMemcpy(pDataOut, pDevData, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(pDevData);
}
|
542
|
#include <stdio.h>
#include <time.h>
#define TPB 256
#define ARRAY_SIZE 1000000
__global__ void saxpy_gpu(int n, float a, float* d_x, float* d_y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
d_y[i] = a * d_x[i] + d_y[i];
}
void saxpy_cpu(int n, float a, float* x, float* y)
{
for (int i = 0; i < n; i++)
y[i] = a * x[i] + y[i];
}
int main(void)
{
const float a = 1.0f;
float* d_x, * d_y, * x, * y;
float* result_y;
clock_t start, end;
int flag;
double cpu_time_used = 0.0;
x = (float*)malloc(ARRAY_SIZE * sizeof(float));
y = (float*)malloc(ARRAY_SIZE * sizeof(float));
result_y = (float*)malloc(ARRAY_SIZE * sizeof(float));
cudaMalloc(&d_x, ARRAY_SIZE * sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE * sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
saxpy_gpu<<<(ARRAY_SIZE + 255) / TPB, TPB >>> (ARRAY_SIZE, a, d_x, d_y);
cudaDeviceSynchronize();
printf("Calculation on GPU Done \n");
cudaMemcpy(result_y, d_y, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
start = clock();
saxpy_cpu(ARRAY_SIZE, a, x, y);
end = clock();
cpu_time_used += (double)(end - start)/ CLOCKS_PER_SEC ;
printf("Calculation on CPU Done \n");
printf("CPU saxpy: %f seconds.\n", cpu_time_used);
for (int i = 0; i < ARRAY_SIZE; i++)
{
if (result_y[i] != y[i])
printf("Error in results \n");
}
printf("Comparing the output for each implementation Correct!");
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
free(result_y);
return 0;
}
|
543
|
// test the size of shared memory for each block
#include <iostream>
#include <cstdio>
using namespace std;
#define N 2500
__global__
void fun()
{
__shared__ double x[N], y[N];
for (int i=0; i<400; ++i)
for (int j=0; j<N; ++j)
y[j] += x[j];
}
int main()
{
fun<<<1,1>>>();
cudaDeviceSynchronize();
}
|
544
|
//*****************************************************************************
//Projet HPC fusion et trie de tableaux sur GPU
//Auteur: ROBIN Clement et SAULNIER Solene
//Promo: MAIN5
//Date: decembre 2020
//Question 3
//*****************************************************************************
#include <stdio.h>
#include <stdlib.h>
#define N 536870912
#define threadsPerBlock 1024
#define nbBlocks 65535
//*****************************************************************************
//Fonctions CPU de verification
//*****************************************************************************
int verif_trie(int *tab,int size)
{
for (int i=0; i<size-1; i=i+1)
if (tab[i]>tab[i+1])
return i;
return -1;
}
//*****************************************************************************
//Fonctions GPU (merge tableau)
//*****************************************************************************
__device__ void pathBig_k(int *A, int *B, int *Path, int size_A, int size_B, int size_M)
{
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i<size_M; i = i+blockDim.x*gridDim.x)
{
int K[2],P[2],Q[2];
int offset;
if (i>size_A)
{
K[0]=i-size_A;
K[1]=size_A;
P[0]=size_A;
P[1]=i-size_A;
}
else
{
K[0]=0;
K[1]=i;
P[0]=i;
P[1]=0;
}
while (1)
{
offset=abs(K[1]-P[1])/2;
Q[1]=K[1]-offset;
Q[0]=K[0]+offset;
if (Q[1] >= 0 && Q[0] <= size_B && (Q[1]== size_A || Q[0]==0 || A[Q[1]]>B[Q[0]-1]))
{
if (Q[0]==size_B || Q[1]==0 || A[Q[1]-1]<=B[Q[0]])
{
if (Q[1]<size_A && (Q[0]==size_B || A[Q[1]]<=B[Q[0]]))
{
Path[i]=1;
Path[i+size_M]=Q[1];
}
else
{
Path[i]=0;
Path[i+size_M]=Q[0];
}
break;
}
else
{
K[0]=Q[0]+1;
K[1]=Q[1]-1;
}
}
else
{
P[0]=Q[0]-1;
P[1]=Q[1]+1;
}
}
}
}
__device__ void mergeBig_k(int *A, int *B, int *M,int *Path, int size_A, int size_B, int size_M)
{
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i<size_M; i = i+blockDim.x*gridDim.x)
{
if (Path[i]==1)
M[i]=A[Path[i+size_M]];
else if (Path[i]==0)
M[i]=B[Path[i+size_M]];
else
printf("ERROR thread num %d block %d",i,blockIdx.x);
}
}
__global__ void sortManager_GPU(int *A, int *B, int *M,int *Path, int size_A, int size_B, int size_M)
{
pathBig_k(A, B, Path, size_A, size_B, size_M);
mergeBig_k(A, B, M, Path, size_A, size_B, size_M);
}
void sortManager_CPU(int *h_M,int h_size_A,int h_size_B,int h_slice_size,int i, int numThreads, int numBlocks)
{
/*Variables CPU*/
int h_size_M_tmp= h_size_A+h_size_B;
int *h_A;
int *h_B;
int *h_M_tmp;
h_A=(int *)malloc(h_size_A*sizeof(int));
h_B=(int *)malloc(h_size_B*sizeof(int));
h_M_tmp=(int *)malloc(h_size_M_tmp*sizeof(int));
/*Remplir A et B*/
for (int j=0; j<h_size_A; j++)
h_A[j] = h_M[i*h_slice_size+j];
for (int j=0; j<h_size_B; j++)
h_B[j] = h_M[i*h_slice_size+j+h_size_A];
/*Variables GPU*/
int *d_A;
int *d_B;
int *d_M_tmp;
int *d_Path_tmp;
cudaMalloc(&d_A,h_size_A*sizeof(int));
cudaMalloc(&d_B,h_size_B*sizeof(int));
cudaMalloc(&d_M_tmp,h_size_M_tmp*sizeof(int));
cudaMalloc(&d_Path_tmp,h_size_M_tmp*sizeof(int));
/*Transfert*/
cudaMemcpy(d_A, h_A,h_size_A*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B,h_size_B*sizeof(int), cudaMemcpyHostToDevice);
/*Sort d une slice de M*/
if (h_size_A<h_size_B)
{
sortManager_GPU<<<numBlocks,numThreads>>>(d_B, d_A, d_M_tmp, d_Path_tmp, h_size_B, h_size_A, h_size_M_tmp);
cudaDeviceSynchronize();
}
else
{
sortManager_GPU<<<numBlocks,numThreads>>>(d_A, d_B, d_M_tmp, d_Path_tmp, h_size_A, h_size_B, h_size_M_tmp);
cudaDeviceSynchronize();
}
/*Transfert memoire GPU*/
cudaMemcpy(h_M_tmp, d_M_tmp, h_size_M_tmp*sizeof(int), cudaMemcpyDeviceToHost);
/*Copie de h_M_tmp dans h_M*/
for (int j=0; j<h_size_M_tmp; j++)
h_M[i*h_slice_size+j]=h_M_tmp[j];
/*Liberation*/
free(h_A);
free(h_B);
free(h_M_tmp);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_M_tmp);
cudaFree(d_Path_tmp);
}
//*****************************************************************************
//MAIN
//*****************************************************************************
int main(int argc, char const *argv[])
{
//srand (time (NULL));
srand (42);
/*Initialisation du nombre de threads et blocks*/
int numThreads=threadsPerBlock;
int numBlocks=nbBlocks;
/*Declaration des variables CPU*/
/*Taille des tableaux*/
int h_taille_M=10000;
/*Traitement des options*/
for (int i=0; i<argc-1; i=i+1)
{
if (strcmp(argv[i],"--s")==0 && atoi(argv[i+1])<N )
h_taille_M=atoi(argv[i+1]);
if (strcmp(argv[i],"--threads")==0 && atoi(argv[i+1])<threadsPerBlock )
numThreads=atoi(argv[i+1]);
if (strcmp(argv[i],"--blocks")==0 && atoi(argv[i+1])<nbBlocks )
numBlocks=atoi(argv[i+1]);
}
/*Tableaux et allocation memoire*/
int *h_M;
h_M=(int *)malloc(h_taille_M*sizeof(int));
/*Déclaration des variables GPU*/
int *d_M;
cudaMalloc(&d_M,h_taille_M*sizeof(int));
/*Initialisation et preparation des tableaux*/
for (int i=0; i<h_taille_M;i++)
h_M[i]=rand()%10000;
/*Timer*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*Sort de M*/
int h_slice_size=1;
int h_slice_number=h_taille_M/2;
int h_slice_reste_precedent=0;
int h_slice_reste=0;
cudaEventRecord(start);
while (h_slice_number > 0)
{
/*Mise a jour taille et indices*/
h_slice_size=2*h_slice_size;
h_slice_reste_precedent=h_slice_reste;
h_slice_reste=h_taille_M%h_slice_size;
h_slice_number=h_taille_M/h_slice_size;
for (int i=0; i<h_slice_number; i++)
sortManager_CPU(h_M,h_slice_size/2,h_slice_size/2,h_slice_size,i,numThreads,numBlocks);
if (h_slice_reste_precedent!=0 && h_slice_reste!=0)
{
int h_taille_A=h_slice_reste-h_slice_reste_precedent;
int h_taille_B=h_slice_reste_precedent;
sortManager_CPU(h_M,h_taille_A,h_taille_B,h_slice_size,h_slice_number,numThreads,numBlocks);
}
}
cudaDeviceSynchronize();
cudaEventRecord(stop);
/*Affichage du chrono*/
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
fprintf(stderr,"sort Taille_M: %d, nbthreads: %d, numblocks: %d, Temps: %.5f, verif: %d\n", h_taille_M, numThreads, numBlocks, ms,verif_trie(h_M,h_taille_M));
/*Verification*/
if (verif_trie(h_M,h_taille_M)==-1)
printf("ok tableau trie");
else
printf("KO recommencer %d ",verif_trie(h_M,h_taille_M) );
/*Liberation*/
free(h_M);
return 0;
}
|
545
|
#include "stdio.h"
#include<iostream>
#include <cuda.h>
#include <cuda_runtime.h>
//Defining two constants
__constant__ float constant_f;
__constant__ float constant_g;
#define N 5
//Kernel function for using constant memory
__global__ void gpu_constant_memory(float *d_in, float *d_out)
{
//Getting thread index for current kernel
int tid = threadIdx.x;
d_out[tid] = constant_f*d_in[tid] + constant_g;
}
int main()
{
float h_x[N], h_y[N], h_f, h_g;
float *d_x, *d_y;
h_f = 5; h_g = 10;
for(int i=0; i<N; i++)
h_x[i] = i;
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cudaMemcpyToSymbol(constant_f, &h_f, sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(constant_g, &h_g, sizeof(float));
cudaMemcpy(d_x, &h_x, N*sizeof(float), cudaMemcpyHostToDevice);
gpu_constant_memory<<<1,N>>>(d_x, d_y);
cudaMemcpy(&h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
printf("%0.3f * %0.3f + %0.3f = %0.3f\n", h_x[i], h_f, h_g, h_y[i]);
cudaFree(d_x);
cudaFree(d_y);
}
|
546
|
#include <stdio.h>
#include <sys/time.h>
// #include <demo_util.h>
// #include <cuda_util.h>
#define CLOCK_RATE 1076000 // Titan
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return (double) tp.tv_sec + (double)tp.tv_usec*1e-6;
}
__device__ void sleep(float t)
{
clock_t t0 = clock64();
clock_t t1 = t0;
while ((t1 - t0)/(CLOCK_RATE*1000.0f) < t)
{
t1 = clock64();
}
}
__global__ void worker()
{
sleep(1.0);
}
int main(int argc, char** argv)
{
cudaDeviceProp prop;
clock_t clock_rate;
int mp;
double etime, start;
cudaGetDeviceProperties(&prop, 0); /* Only look at first processor */
printf("Name: %s\n", prop.name );
mp = prop.multiProcessorCount;
clock_rate = prop.clockRate;
printf("Clock rate = %d\n",clock_rate);
int threads_per_block = 16;
int blocks_per_sm = 1;
dim3 block(threads_per_block);
dim3 grid(mp*blocks_per_sm);
start = cpuSecond();
worker<<<grid,block>>>();
cudaDeviceSynchronize();
etime = cpuSecond() - start;
int total_threads = block.x*grid.x;
printf("Device has %d SMs\n",mp);
printf("%27s %12d\n", "Threads per block",block.x*block.y);
printf("%27s %12d\n", "Total number of blocks",grid.x);
printf("%27s %12d\n", "Total number of threads",total_threads);
printf("%27s %12.3f (s)\n","GPU Kernel Time (scaled)", etime);
printf("\n");
cudaDeviceReset();
}
|
547
|
// a cuda app. we will convert this to opencl, and run it :-)
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda_runtime.h>
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
int main(int argc, char *argv[]) {
int N = 1024;
float *gpuFloats1;
cudaMalloc((void**)(&gpuFloats1), N * sizeof(float));
float *gpuFloats2;
cudaMalloc((void**)(&gpuFloats2), N * sizeof(float));
setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuFloats1, 2, 123.0f);
cudaMemcpy(gpuFloats2, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToDevice);
setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuFloats1, 2, 444.0f);
float hostFloats[4];
cudaMemcpy(hostFloats, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost);
cout << "This should be 123:" << endl;
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 123);
cudaFree(gpuFloats1);
return 0;
}
|
548
|
// Hello Cuda World Program //
/*
* Author: Malhar Bhatt
* Subject : High Performance Computing
*
*/
#include <iostream>
/**
* Empty Function named Kernel() qualified with __global__
*
*/
__global__ void kernel (void)
{
}
int main(void)
{
kernel<<<1,1>>>(); // Calling Empty Function
printf("Hello Cuda World !!!\n"); // Printing Hello Cuda World
system("pause");
return 0;
}
|
549
|
#include "includes.h"
__global__ void cudaSmult_kernel(unsigned int size, const float *x1, const float *x2, float *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
}
|
550
|
// Homework_5
// Problem_4
// change the array size to 8000. Check if answer to problem 3 still works.
// RUN as
// nvcc prob4.cu
// ./a.out
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
//Kernel function to initialize array
__global__
void initialize(int *arr, int size){
int sectors = blockIdx.x * blockDim.x + threadIdx.x;
int increment = gridDim.x * blockDim.x;
for (int i = sectors; i < size; i += increment){
arr[i] = 0;
}
}
//add kernel function to add i to a[i]
__global__
void addIValue(int *arr, int size){
int sectors = blockIdx.x * blockDim.x + threadIdx.x;
int increment = gridDim.x * blockDim.x;
for (int i = sectors; i < size; i+= increment){
arr[i] += i;
}
}
//loop
void print(int *ar, int size){
printf("\n");
for (int i = 0; i < size; i++){
printf("%d ", ar[i]);
}
printf("\n");
}
// it prints out message of running
int main(void){
printf("Homework#5\nProblem 4:Change the array size to 8000. Check if answer to problem 3 still works\n---Successfully initiated---\n---Check the code---");
//here declare int array
int size = 8000;
int *array;
int GPU = 32;
int arraySize = size * sizeof(int);
cudaMallocManaged(&array, arraySize);
int blocks = (size + GPU - 1) / GPU;
initialize<<<blocks, GPU>>>(array, size);
//here add value of i to array
addIValue<<<blocks, GPU>>>(array, size);
cudaDeviceSynchronize();
print(array, size);
cudaFree(array);
cudaDeviceReset();
return 0;
}
|
551
|
__global__ void kernel1(int m, int n, int k, double *d_A, double *d_B, double *d_C){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
d_C[i*n + j] = 0.0;
}
}
//mkn
for(int i = 0; i < m; i++){
for(int s = 0; s < k; s++){
for(int j = 0; j < n; j++){
d_C[i*n + j] += d_A[i*k + s] * d_B[s*n + j]; }
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double *d_A, *d_B, *d_C; //variable on device
int size_matrix_A = m * k * sizeof(double);
cudaMalloc((void**)&d_A, size_matrix_A); // allocate memory on GPU
int size_matrix_B = k * n * sizeof(double);
cudaMalloc((void**)&d_B, size_matrix_B);
int size_matrix_C = m * n * sizeof(double);
cudaMalloc((void**)&d_C, size_matrix_C);
//copy A and B to GPU
cudaMemcpy(d_A, A, size_matrix_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size_matrix_B, cudaMemcpyHostToDevice);
// Launch kernel using 1 thread per block and 1 block
kernel1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
//transfer C back to CPU
cudaMemcpy(C, d_C, size_matrix_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C); }
}
|
552
|
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
using namespace std;
__global__ void initArray( int *A) {
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
A[tid] = tid;
}
__global__ void swapArray( int *A, int size, int num_t) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=size/2/num_t*tid; i<size/2/num_t*(tid+1); i++){
int temp = A[i];
A[i] = A[size-1-i];
A[size-1-i] = temp;
}
}
int main(){
srand(time(0));
int size=16*1024*1024;
//int size=16;
int* ary = new int[size];
int* ans = new int[size];
int num;
for(int i=0; i<size; i++){
num = rand();
ary[i] = num;
ans[size-i-1] = num;
}
int *d_a;
// define thread hierarchy
int num_blocks = 8; int num_th_per_blk = 16;
int num_t = num_blocks*num_th_per_blk;
// allocate host and device memory
size_t memSize;
// memSize = num_blocks * num_th_per_blk * sizeof(int);
memSize = size*sizeof(int);
// h_a = (int*) malloc(memSize);
cudaMalloc( (void**) &d_a, memSize);
cudaMemcpy( d_a, ary, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
swapArray<<< dimGrid, dimBlock >>>(d_a, size, num_t);
// retrieve results
cudaMemcpy( ary, d_a, memSize, cudaMemcpyDeviceToHost);
for(int i=0; i<size; i++){
if(ary[i]!=ans[i]){
cout << i << " "<< ans[i] << " "<< ary[i] << endl;
cout << "not match" << endl;
break;
}
// cout << ans[i] << " "<<ary[i] <<endl;
}
return 0;
}
|
553
|
//
// A simple function that squares all the elements of an array
// through a call to a CUDA kernel
//
#include "cudasquare.cuh"
// Square function kernel
// It squares all the elements of an array
// Arguments:
// a: array that must be squared in the GPU
// b: squared int array (output)
// n: number of elements of the array
__global__
void square_kernel(int *a, int *b, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n) {
int val=a[i];
b[i] = val*val;
}
}
// Square function
// Arguments:
// ha: array that must be squared in the CPU
// ('h' stands for 'host'.)
// n: number of elements of the array
// Return value: squared int array
int *square(int *ha, int n)
{
// Create result int arrays on the CPU.
int *hb = new int[n];
// Create corresponding int arrays on the GPU.
// ('d' stands for 'device'.)
int *da, *db;
cudaMalloc(&da, n*sizeof(int));
cudaMalloc(&db, n*sizeof(int));
// Copy input data from array on CPU to array on GPU.
cudaMemcpy(da, ha, n*sizeof(int), cudaMemcpyHostToDevice);
// Launch GPU code with n threads, one per
// array element.
square_kernel<<<(n+1023)/1024, 1024>>>(da, db, n);
// Copy output array from GPU back to CPU.
cudaMemcpy(hb, db, n*sizeof(int), cudaMemcpyDeviceToHost);
// Free up the arrays on the GPU.
cudaFree(da);
cudaFree(db);
//return as output the resulting array
return hb;
}
|
554
|
#include "includes.h"
__global__ void kernel_setAllPointsToRemove(int number_of_points, bool *d_markers_out)
{
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind<number_of_points)
{
d_markers_out[ind] = false;
}
}
|
555
|
// This example is taken from https://cuda-tutorial.readthedocs.io/en/latest/
#include <stdio.h>
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
printf("Hello World from CPU!\n");
cuda_hello<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
}
|
556
|
void cpu_jacobi(double ***u, double ***u_old, double ***f, int N, int delta, int iter_max, int *iter) {
int temp_iter = *iter;
int i, j, k;
double delta_2 = delta*delta;
double div_val = 1.0/6.0;
double ***temp_pointer;
#pragma omp parallel default(none) \
shared(iter_max, N, f, delta_2, div_val, u, u_old, temp_pointer, temp_iter) \
private(i,j,k)
{
while (temp_iter < iter_max) {
#pragma omp for
for (i = 1; i < N - 1; ++i) {
for (j = 1; j < N - 1; ++j) {
for (k = 1; k < N - 1; ++k) {
u[i][j][k] = (u_old[i - 1][j][k] + u_old[i + 1][j][k]
+ u_old[i][j - 1][k] + u_old[i][j + 1][k]
+ u_old[i][j][k - 1] + u_old[i][j][k + 1]
+ delta_2 * f[i][j][k]) * div_val;
}
}
}
// Pointer redo
#pragma omp barrier
#pragma omp single
{
temp_pointer = u;
u = u_old;
u_old = temp_pointer;
temp_iter++;
}
}
}
*iter = temp_iter;
}
|
557
|
#include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <cmath>
#define M 2048
#define W 15
#define w 3
#define threshold 80
using namespace std;
__global__ void smoothening_kernel(float* d_filter,float* d_raw_image,float* d_hx,float* d_hy,float* d_gx,float* d_gy,float* d_smooth_image,float* d_edged_image,int block_size){
int Bx = blockIdx.x;
int By = blockIdx.y;
int Tx = threadIdx.x;
int Ty = threadIdx.y;
/* defining row and column index tp parse through filters and image*/
int rowd = By* block_size + Ty;
int columd = Bx* block_size + Tx;
/*boundaries checking*/
int rr = rowd - W/2;
int cc = columd - W/2;
float acc = 0.0;
/*convolution for smmothening*/
for(int k = 0; k < W; k++ ){
for(int l = 0; l < W; l++){
if((rr + k) >= 0 && (rr + k) < M && (cc + l) >= 0 && (cc + l) < M){
acc += d_raw_image[(rr + k) * M + (cc + l)] * d_filter[k * W + l];
}
}
d_smooth_image[rowd * M + columd] = acc;
}
/*convolution for edge detection */
int mm = rowd - w/2;
int nn = columd - w/2;
float acc1 = 0.0;
float acc2 = 0.0;
for(int k = 0; k < w; k++ ){
for(int l = 0; l < w; l++){
if((mm + k) >= 0 && (mm + k) < M && (nn + l) >= 0 && (nn + l) < M){
acc1 += d_smooth_image[(mm + k) * M + (nn + l)] * d_hx[k * w + l];
acc2 += d_smooth_image[(mm + k) * M + (nn + l)] * d_hy[k * w + l];
}
}
d_gx[rowd * M + columd] = acc1;
d_gy[rowd * M + columd] = acc2;
}
// gradient magnitude of spatial domains
d_edged_image[rowd * M + columd] = sqrt(pow(d_gx[rowd * M + columd], 2) + pow(d_gy[rowd * M + columd], 2));
if(d_edged_image[rowd * M + columd] > threshold){d_edged_image[rowd * M + columd] = 255;}
else{d_edged_image[rowd * M + columd] = 0;}
}
int main(int argc, char* argv[]){
int block_size = atoi(argv[1]);
float h_filter[W][W]; //Gaussian filter
float h_x[w][w] = {{-1.0,0.0,1.0},{-2.0,0.0,2.0},{-1.0,0.0,1.0}}; // Sobel operator
float h_y[w][w] = {{-1.0,-2.0,-1.0},{0.0,0.0,0.0},{1.0,2.0,1.0}}; //Sobel operator
double sigma = 1.5;
float P = 1.0/(2* M_PI * sigma*sigma);
float Q = 2.0* M_PI * sigma*sigma;
float sum = 0.0;
long image_size;
size_t elements;
int L = (W-1)/2;
/*initializing gaussian filter*/
for(int x = -W/2; x <= W/2; x++){
for(int y = -W/2; y <= W/2; y++){
int I = (x+ W/2) - L;
int J = (y+ W/2) - L;
h_filter[x + W/2][y + W/2] = P*(exp(-(I*I + J*J)/Q));
sum += h_filter[x + W/2][y + W/2];
}
}
for(int i = 0; i < W; i++){
for(int j = 0; j < W; j++){
h_filter[i][j]/= sum;
}
}
// verify gaussian filter
cout<<"guassian filter" <<endl;
for(int q = 0; q < 15; q++){
for(int z = 0; z <15; z++){
cout<<h_filter[q][z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
FILE* fp_in, *fp_out1, *fp_out2;
fp_in = fopen ("Rainier2048_noise.bin","rb");
if(fp_in == NULL){cout<<"FILE ERROR!"<<endl;
exit(1); }
//obtain file size
fseek(fp_in, 0, SEEK_END);
image_size = ftell(fp_in);
rewind(fp_in);
// allocate buffer of image size
unsigned char* buffer = (unsigned char*)malloc(sizeof(unsigned char) * image_size);
unsigned char* buffer1 = (unsigned char*)malloc(sizeof(unsigned char) * image_size);
//copy file into buffer
elements = fread(buffer, sizeof(unsigned char), image_size, fp_in);
if(elements != image_size){cout<<"READ ERROR! "<<endl;
exit(2);}
fclose(fp_in);
float* fptr = (float*)malloc(sizeof(float)* M * M);
//typecast from char to float
for(int row = 0; row < M; row++){
for(int col = 0; col < M; col++){
fptr[row * M + col] = (float) buffer[row * M + col];
}
}
cout<<"raw image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<buffer[q * M + z]<<" ";
}
cout<<endl;
}
cout<<"raw image of float type" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<fptr[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
float* smooth_image = (float*)malloc(sizeof(float)* M * M);
float* edged_image = (float*)malloc(sizeof(float)* M * M);
float* d_gx;
float* d_gy;
float* d_hx;
float* d_hy;
float* d_raw_image;
float* d_filter;
float* d_smooth_image;
float* d_edged_image;
cudaMalloc((void**)&d_hx,sizeof(float)* w * w);
cudaMalloc((void**)&d_hy,sizeof(float)* w * w);
cudaMalloc((void**)&d_filter,sizeof(float)* W * W);
cudaMalloc((void**)&d_raw_image,sizeof(float)* M * M);
cudaMalloc((void**)&d_smooth_image,sizeof(float)* M * M);
cudaMalloc((void**)&d_edged_image,sizeof(float)* M * M);
cudaMalloc((void**)&d_gx,sizeof(float)* M * M);
cudaMalloc((void**)&d_gy,sizeof(float)* M * M);
/* measuring execution time */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*copy image and filters from host to device */
cudaMemcpy(d_raw_image, fptr, sizeof(float) * M * M, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter,h_filter , sizeof(float) * W * W, cudaMemcpyHostToDevice);
cudaMemcpy(d_hx, h_x , sizeof(float) * w * w, cudaMemcpyHostToDevice);
cudaMemcpy(d_hy, h_y , sizeof(float) * w * w, cudaMemcpyHostToDevice);
/*define block size and grid size and invoke kernel*/
dim3 threadsPerBlock(block_size, block_size);
int numblocks = M / block_size;
dim3 blocksPerGrid(numblocks, numblocks);
cudaEventRecord(start);
smoothening_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_filter,d_raw_image,d_hx,d_hy,d_gx,d_gy,d_smooth_image,d_edged_image,block_size);
/* copy results from device to host */
cudaMemcpy(smooth_image, d_smooth_image, sizeof(float) * M * M, cudaMemcpyDeviceToHost);
cudaMemcpy(edged_image, d_edged_image, sizeof(float) * M * M, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0.0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<"he parallel execution time for block size "<< block_size << " is "<< milliseconds <<" secs" << endl;
/* write edge detected image to file*/
for(int row = 0; row < M; row++){
for(int col = 0; col < M; col++){
buffer[row * M + col] = (unsigned char) smooth_image[row * M + col];
buffer1[row * M + col] = (unsigned char) edged_image[row * M + col];
}
}
cout<<"smoothened_image buffered"<<endl;
for(int ir = 1024; ir < 1034; ir++){
for(int ic = 1525; ic < 1535; ic++){
cout<< *(buffer + ir * M + ic) <<" ";
}
cout<<endl;
}
cout<<" "<<endl;
fp_out1 = fopen("smoothened_image_cuda.bin", "wb");
fwrite(buffer, sizeof(unsigned char), image_size, fp_out1);
fclose(fp_out1);
fp_out2 = fopen("Edge_detected_image_cuda.bin", "wb");
fwrite(buffer1,sizeof(unsigned char), image_size, fp_out2);
fclose(fp_out2);
cout<<"smoothened image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<smooth_image[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
cout<<"edged_image buffered"<<endl;
for(int ir = 1024; ir < 1034; ir++){
for(int ic = 1525; ic < 1535; ic++){
cout<< *(buffer1 + ir * M + ic) <<" ";
}
cout<<endl;
}
cout<<" "<<endl;
cout<<"edged_image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<edged_image[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
/* free device memory*/
cudaFree(d_raw_image);
cudaFree(d_hx);
cudaFree(d_hy);
cudaFree(d_smooth_image);
cudaFree(d_edged_image);
cudaFree(d_gx);
cudaFree(d_gy);
cudaFree(d_filter);
/*free host memory*/
delete[] fptr;
delete[] smooth_image;
delete[] buffer;
delete[] buffer1;
delete[] edged_image;
return 0;
}
|
558
|
#include "includes.h"
__global__ void updateVelocity_k(float2 *v, float *vx, float *vy, int dx, int pdx, int dy, int lb, size_t pitch) {
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
float vxterm, vyterm;
float2 nvterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx) {
for (p = 0; p < lb; p++) {
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy) {
int fjr = fi * pdx + gtidx;
vxterm = vx[fjr];
vyterm = vy[fjr];
// Normalize the result of the inverse FFT
float scale = 1.f / (dx * dy);
nvterm.x = vxterm * scale;
nvterm.y = vyterm * scale;
float2 *fj = (float2*)((char*)v + fi * pitch) + gtidx;
*fj = nvterm;
}
} // If this thread is inside the domain in Y
} // If this thread is inside the domain in X
}
|
559
|
#include "includes.h"
__global__ void kernel_forwardElimination( float * fullMatrix, float * B, unsigned int nComp ) {
unsigned int t = threadIdx.x;
unsigned int baseIndex = t*nComp*nComp;
unsigned int i,j,k;
for ( i = 0; i < nComp - 1; i++ )
for ( j = i + 1; j < nComp; j++ ) {
double div = fullMatrix[baseIndex+ j*nComp+i ] / fullMatrix[baseIndex+ i*nComp+ i ];
for ( k = 0; k < nComp; k++ )
fullMatrix[ baseIndex+j*nComp+k ] -= div * fullMatrix[baseIndex+ i *nComp+ k ];
B[ baseIndex+j ] -= div * B[ baseIndex+i ];
}
__syncthreads();
}
|
560
|
#include <stdio.h>
#include <sys/time.h>
int main(int argc, char** argv) {
printf("Star timer\n");
// Start the timer
struct timeval tim;
gettimeofday(&tim, NULL);
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// init vars
int malloc_size_bytes, num_mallocs;
// not enough args throw error
if(argc < 2){
printf("usage: %s <int malloc_size_bytes> <int number_mallocs>\n");
}
// take in a command line arg to set the loop count
if(argc > 2){
malloc_size_bytes = atoi(argv[1]);
num_mallocs = atoi(argv[2]);
}
// delcare two variables
int *dev_a;
// get the size of an int for the cuda malloc
int size = malloc_size_bytes;
// loop over num_mallocs
for(int i = 0; i < num_mallocs; i++){
cudaMalloc((void **)&dev_a, size);
cudaFree(dev_a);
}
// Print timing information
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf\t", (t2-t1)); // 1000000000 = 10^9, 1000000 = 10^6
printf("END TIMER!\n");
return 0;
}
|
561
|
#include <iostream>
#include <vector>
#include <ctime>
#include <cstdlib>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
using namespace std;
struct element
{
int key;
float value;
double nana;
__host__ __device__
bool operator<(const element other) const
{
return nana < other.nana;
}
};
//Fill a vector with random numbers in the range [lower, upper]
void rnd_fill(thrust::host_vector<element> &V, const double lower, const double upper, int seed) {
//Create a unique seed for the random number generator
srand(time(NULL));
size_t elem = V.size();
for( size_t i = 0; i < elem; ++i){
V[i].nana = (double) rand() / (double) RAND_MAX;
}
}
template <typename T>
inline void str2num(string str, T& num){
if ( ! (istringstream(str) >> num) ) num = 0;
}
int main( int argc, char** argv ){
int numelem;
string param = argv[1]; str2num( param, numelem);
int numexp;
param = argv[2]; str2num( param, numexp);
ofstream result("experiment.data");
vector<time_t> timer(6);
timer[0] = 0;
timer[5] = 0;
for(int i = 0; i < numelem; ++i){
int size = pow(2, i);
cout << "# elems: " << size << endl;
for( int j = 0; j < numexp; ++j){
//Initialization
thrust::host_vector<element> h_V;
thrust::device_vector<element> g_V;
h_V.resize( size );
int seed = time(0);
rnd_fill( h_V, 0.1, 1.0, seed);
g_V = h_V;
vector<element> c_V;
c_V.resize( size );
for( int i = 0; i < h_V.size(); ++i){
c_V[i].nana = h_V[i].nana;
}
timer[1] = time(0);
thrust::sort( g_V.begin(), g_V.end() );
h_V = g_V;
for( int k = 0; k < g_V.size(); k++){
cout << h_V[k].nana << "\t";
}
timer[2] = time(0);
// result << size << "," << difftime(timer[2], timer[1]) << endl;
timer[0] += difftime(timer[2], timer[1]);
timer[3] = time(0);
// sort( c_V.begin(), c_V.end());
timer[4] = time(0);
// result << size << "," << difftime(timer[4], timer[3]) << endl;
timer[5] += difftime(timer[4], timer[3]);
}
}
cout << "GPU SORTING\n";
cout <<"Tiempo total: " << timer[0] << endl;
cout << "CPU SORTING\n";
cout <<"Tiempo total: " << timer[5] << endl;
result.close();
return 0;
}
|
562
|
#include "includes.h"
__device__ void timeTest1(int *a){
int t_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (t_index < SIZE) {
*a +=5;
}
}
__global__ void timeTest() {
int t_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (t_index < SIZE) {
int a = 0;
for(int i = 0; i < 10000000; i++){
timeTest1(&a);
}
}
}
|
563
|
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
#include <iostream>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
template<typename T>
__global__ void axpy(T a, T *x, T *y) {
y[threadIdx.x] = a * x[threadIdx.x];
}
template<typename T>
__global__ void axpy_empty() {
}
__global__ void empty() {
}
__global__ void nonempty(int x, int y, int z) {
}
int main(int argc, char* argv[]) {
const int kDataLen = 4;
float a = 2.0f;
float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f};
float host_y[kDataLen];
// Copy input data to device.
float* device_x;
float* device_y;
// CHECK: hipMalloc(&device_x, kDataLen * sizeof(float));
cudaMalloc(&device_x, kDataLen * sizeof(float));
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(float));
cudaMalloc(&device_y, kDataLen * sizeof(float));
// CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice);
cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice);
int x = 1, y = 2, z = 3;
size_t N = 32;
// CHECK: hipStream_t stream = NULL;
cudaStream_t stream = NULL;
// CHECK: hipStreamCreate(&stream);
cudaStreamCreate(&stream);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<float><<<1, kDataLen>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<float><<<dim3(1), kDataLen>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<float><<<1, dim3(kDataLen)>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<float><<<dim3(1), dim3(kDataLen)>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y);
axpy<float><<<1, kDataLen, N>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y);
axpy<float><<<dim3(1), kDataLen, N>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y);
axpy<float><<<1, dim3(kDataLen), N>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y);
axpy<float><<<dim3(1), dim3(kDataLen), N>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y);
axpy<float><<<1, kDataLen, N, stream>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y);
axpy<float><<<dim3(1), kDataLen, N, stream>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y);
axpy<float><<<1, dim3(kDataLen), N, stream>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y);
axpy<float><<<dim3(1), dim3(kDataLen), N, stream>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0);
axpy_empty<float><<<1, kDataLen>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0);
axpy_empty<float><<<dim3(1), kDataLen>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0);
axpy_empty<float><<<1, dim3(kDataLen)>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0);
axpy_empty<float><<<dim3(1), dim3(kDataLen)>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0);
axpy_empty<float><<<1, kDataLen, N>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0);
axpy_empty<float><<<dim3(1), kDataLen, N>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0);
axpy_empty<float><<<1, dim3(kDataLen), N>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0);
axpy_empty<float><<<dim3(1), dim3(kDataLen), N>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream);
axpy_empty<float><<<1, kDataLen, N, stream>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream);
axpy_empty<float><<<dim3(1), kDataLen, N, stream>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream);
axpy_empty<float><<<1, dim3(kDataLen), N, stream>>>();
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream);
axpy_empty<float><<<dim3(1), dim3(kDataLen), N, stream>>>();
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0);
empty<<<1, kDataLen>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0);
empty<<<dim3(1), kDataLen>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0);
empty<<<1, dim3(kDataLen)>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0);
empty<<<dim3(1), dim3(kDataLen)>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0);
empty<<<1, kDataLen, N>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0);
empty<<<dim3(1), kDataLen, N>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0);
empty<<<1, dim3(kDataLen), N>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0);
empty<<<dim3(1), dim3(kDataLen), N>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream);
empty<<<1, kDataLen, N, stream>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream);
empty<<<dim3(1), kDataLen, N, stream>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream);
empty<<<1, dim3(kDataLen), N, stream>>> ( );
// CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream);
empty<<<dim3(1), dim3(kDataLen), N, stream>>> ( );
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z);
nonempty<<<1, kDataLen>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z);
nonempty<<<dim3(1), kDataLen>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z);
nonempty<<<1, dim3(kDataLen)>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z);
nonempty<<<dim3(1), dim3(kDataLen)>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z);
nonempty<<<1, kDataLen, N>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z);
nonempty<<<dim3(1), kDataLen, N>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z);
nonempty<<<1, dim3(kDataLen), N>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z);
nonempty<<<dim3(1), dim3(kDataLen), N>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z);
nonempty<<<1, kDataLen, N, stream>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z);
nonempty<<<dim3(1), kDataLen, N, stream>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z);
nonempty<<<1, dim3(kDataLen), N, stream>>> (x, y, z);
// CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z);
nonempty<<<dim3(1), dim3(kDataLen), N, stream>>> (x, y, z);
// Copy output data to host.
// CHECK: hipDeviceSynchronize();
cudaDeviceSynchronize();
// CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost);
cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
// CHECK: hipDeviceReset();
cudaDeviceReset();
return 0;
}
|
564
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include<unistd.h>
#define DEVICE_ID 0
/* constants for the number of threads and the integration domain */
/* number of threads in a block, 2^n */
#define NT 1024
/* number of blocks in a grid, 2^n */
#define NB 16
/* length of the target domain */
#define L 10.0
/* number of division for the discretization of the target domain, 2^n */
#define N 1024
/* constants on a GPU */
__device__ __constant__ int n;
__device__ __constant__ double l;
//host functions-----------------------------------------------
double calculate_reference(int n, double l) {
int i;
int j;
double d = l / (double)n;
double x;
double y;
double sum = 0.0;
for(i = 0; i < n; i += 1) {
x = d * ((double)i - (double)n / 2.0);
for(j = 0; j < n; j += 1) {
y = d * ((double)j - (double)n / 2.0);
sum += exp(- x * x - y * y);
}
}
return sum * d * d;
}
//device functions---------------------------------------------
__global__ void calculate_each_point(double *device_double) {
int global_id = threadIdx.x + blockIdx.x * blockDim.x;
int i;
double x;
double y;
for(i = global_id; i < n * n; i += NT * NB) {
x = (l / (double)n) * ((double)(i % n) - (double)n / 2.0);
y = (l / (double)n) * ((double)(i / n) - (double)n / 2.0);
device_double[i] = exp(- x * x - y * y);
}
}
//-------------------------------------------------------------
__global__ void reduce_array_global_memory(double *device_double, double *device_double_reduced, int dim_array) {
int i;
int global_id = threadIdx.x + blockIdx.x * blockDim.x;
for(i = global_id; i < dim_array; i += NT * NB) {
if(i < dim_array / 2) {
device_double_reduced[i] = device_double[i] + device_double[i + dim_array / 2];
}
}
}
//-------------------------------------------------------------
__global__ void reduce_array_shared_memory(double *device_double, double *device_double_reduced, int dim_array) {
__shared__ double device_shared_double[NT];
int global_id = threadIdx.x + blockIdx.x * blockDim.x;
int block_id = blockIdx.x;
int local_id = threadIdx.x;
int i;
int j;
for(i = global_id; i < dim_array; i += NB * NT) {
device_shared_double[local_id] = device_double[i];
__syncthreads();
for(j = NT / 2; j > 0; j = j / 2) {
if((local_id < j) && (local_id + j < dim_array)) {
device_shared_double[local_id] += device_shared_double[local_id + j];
}
__syncthreads();
}
if(local_id == 0) {
device_double_reduced[block_id] = device_shared_double[0];
}
__syncthreads();
block_id += NB;
}
}
//main---------------------------------------------------------
int main(void) {
int host_n = N;
double *device_double[2];
double host_l = L;
double host_sum;
double *host_double;//DBG
int i;
int i_temp;
int j;
int k;
//initialize---------------------------------------------------
cudaSetDevice(DEVICE_ID);
cudaMemcpyToSymbol(n, &host_n, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(l, &host_l, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&device_double[0], N * N * sizeof(double));
cudaMalloc((void **)&device_double[1], N * N * sizeof(double));
host_double = (double *)calloc(N * N, sizeof(double));
//tasks--------------------------------------------------------
//calculate on gpu w/o shared memory-------------------
calculate_each_point<<<NB, NT>>>(device_double[0]);
cudaDeviceSynchronize();
i = 0;
j = 1;
for(k = N * N; k > 1; k = k / 2 + k % 2) {
reduce_array_global_memory<<<NB, NT>>>(device_double[i], device_double[j], k);
cudaDeviceSynchronize();
i_temp = i;
i = j;
j = i_temp;
}
cudaMemcpy(&host_sum, device_double[i], sizeof(double), cudaMemcpyDeviceToHost);
host_sum = host_sum * host_l * host_l / (double)host_n / (double)host_n;
printf("calc on gpu w/o shared mem:%f\n", host_sum);
//calculate on gpu with shared memory------------------
calculate_each_point<<<NB, NT>>>(device_double[0]);
cudaDeviceSynchronize();
i = 0;
j = 1;
for(k = N * N; k > 1; k = k / NT) {
reduce_array_shared_memory<<<NB, NT>>>(device_double[i], device_double[j], k);
cudaDeviceSynchronize();
i_temp = i;
i = j;
j = i_temp;
}
cudaMemcpy(&host_sum, device_double[i], sizeof(double), cudaMemcpyDeviceToHost);
host_sum = host_sum * host_l * host_l / (double)host_n / (double)host_n;
printf("calc on gpu with shared mem:%f\n", host_sum);
//calculate on cpu-------------------------------------
host_sum = calculate_reference(host_n, host_l);
printf("all done on cpu:%f\n", host_sum);
//finalize-----------------------------------------------------
cudaFree(device_double[0]);
cudaFree(device_double[1]);
cudaDeviceReset();
free(host_double);
//work in host-------------------------------------------------
return 0;
}
|
565
|
// includes, system
#include <stdio.h>
#include <assert.h>
#include <chrono>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
cudaSetDevice(MYDEVICE);
// pointer and dimension for host memory
int n, dimA;
float *h_a;
// pointers for device memory
float *d_a, *d_b;
// allocate and initialize host memory
// Bonus: try using cudaMallocHost in place of malloc
dimA = 8;
//h_a = (float *) malloc(dimA*sizeof(float));
cudaMallocHost(&h_a, dimA*sizeof(float));
printf("Allocation done!\n");
for (n=0; n<dimA; n++)
{
h_a[n] = (float) n;
}
// Part 1 of 5: allocate device memory
size_t memSize = dimA*sizeof(float);
cudaMalloc(&d_a, memSize);
cudaMalloc(&d_b, memSize);
// Part 2 of 5: host to device memory copy
auto start = std::chrono::system_clock::now();
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> dur= end - start;
printf("Time spent in seconds: %f\n", dur.count());
auto bandwidth = (memSize/dur.count())/1000000;
printf("PCI Express Bandwidth in MB/s: %f\n", bandwidth);
// Part 3 of 5: device to device memory copy
cudaMemcpy(d_b, d_a, memSize, cudaMemcpyDeviceToDevice);
// clear host memory
for (n=0; n<dimA; n++)
{
h_a[n] = 0.f;
}
// Part 4 of 5: device to host copy
cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy calls");
// verify the data on the host is correct
for (n=0; n<dimA; n++)
{
assert(h_a[n] == (float) n);
}
// Part 5 of 5: free device memory pointers d_a and d_b
cudaFree(d_a);
cudaFree(d_b);
// Check for any CUDA errors
checkCUDAError("cudaFree");
// free host memory pointer h_a
//free(h_a);
cudaFree(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
566
|
#include "includes.h"
__global__ void AddIntegers(int *arr1, int *arr2, int num_elements)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_elements)
{
arr1[id] += arr2[id];
}
}
|
567
|
// Simple starting example for CUDA program
// Kees Lemmens, last change May 2012
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define NRBLKS 4 // Nr of blocks in a kernel (gridDim)
#define NRTPBK 4 // Nr of threads in a block (blockDim)
void checkCudaError(char *error)
{
if (cudaGetLastError() != cudaSuccess)
{
fprintf (stderr, "Cuda : %s\n",error);
exit(EXIT_FAILURE);
}
}
__global__ void decodeOnGPU(char *string)
{
// Each thread decodes a single character from the encoded string
int myid = (blockIdx.x * blockDim.x) + threadIdx.x;
char decoder =(char)011; // 000001001 = 011 in octal
string[myid] ^= decoder;
}
int main(void)
{
char *encryption = "Aleef)^f{em)(((\11";
char *string_h; // pointer to host memory
char *string_d; // pointer to device memory
int len = NRTPBK * NRBLKS;
// allocate memory on device
string_h = (char *)malloc(len);
cudaMalloc((void **) &string_d, len);
checkCudaError("Malloc failed on GPU device !");
// copy encrypted string to device memory
cudaMemcpy(string_d, encryption, sizeof(char) * len, cudaMemcpyHostToDevice);
checkCudaError("Sending data to GPU device failed !");
// Start the decoding GPU kernel on NRTPBK threads and NRBLKS blocks
decodeOnGPU <<< NRBLKS, NRTPBK >>> (string_d);
checkCudaError("Kernel failed on GPU device !");
// retrieve data from GPU device: string_d to string_h
cudaMemcpy(string_h, string_d, len, cudaMemcpyDeviceToHost);
checkCudaError("Receiving data from GPU device failed !");
printf("%s \n", string_h);
// cleanup memory
cudaFree(string_d);
free(string_h);
}
|
568
|
#include "k_indices.cuh"
namespace timemachine {
// Takes a source and destination array.
// The value of the src is used as the index and the value in the destination array. Allows combining
// a series of indices to get a unique set of values.
void __global__ k_unique_indices(
const int N, // Number of values in src
const int K, // Number of values in dest
const unsigned int *__restrict__ src,
unsigned int *__restrict__ dest) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
const unsigned int val = src[idx];
if (val >= K) {
return;
}
dest[val] = val;
}
// Any value that is >=N becomes the idx and any value that is an idx becomes N
void __global__ k_invert_indices(const int N, unsigned int *__restrict__ arr) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
arr[idx] = arr[idx] >= N ? idx : N;
}
void __global__ k_arange(const int N, unsigned int *__restrict__ arr, unsigned int offset) {
const int atom_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (atom_idx >= N) {
return;
}
arr[atom_idx] = atom_idx + offset;
}
void __global__ k_arange(const int N, int *__restrict__ arr, int offset) {
const int atom_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (atom_idx >= N) {
return;
}
arr[atom_idx] = atom_idx + offset;
}
} // namespace timemachine
|
569
|
#include <stdio.h>
__global__ void array_reverse(int *array_a_dev, int *array_a_rev_dev, int len)
{
int tid = threadIdx.x;
array_a_rev_dev[len - tid - 1] = array_a_dev[tid];
}
__global__ void array_reverse_shared(int *array_a_dev, int *array_a_rev_dev, int len)
{
int tid = threadIdx.x;
__shared__ int array_shared[9];
array_shared[tid] = array_a_dev[tid];
__syncthreads();
array_a_rev_dev[len - tid - 1] = array_shared[tid];
}
__global__ void array_reverse_dynamic_shared(int *array_a_dev, int *array_a_rev_dev, int len)
{
int tid = threadIdx.x;
extern __shared__ int array_shared[];
// __shared__ int array_shared[9];
array_shared[tid] = array_a_dev[tid];
__syncthreads();
array_a_rev_dev[len - tid - 1] = array_shared[tid];
}
int main()
{
int len = 9;
int *array_a_host;
int *array_a_rev_host;
int *array_result_back_to_host;
array_a_host = (int *)malloc(len * sizeof(int));
array_a_rev_host = (int *)malloc(len * sizeof(int));
array_result_back_to_host = (int *)malloc(len * sizeof(int));
for(int i = 0; i < len; i++)
{
array_a_host[i] = i * 2;
}
for(int i = 0; i < len; i++)
{
array_a_rev_host[len - i - 1] = array_a_host[i];
}
printf("\n-------------Array a-----------------\n");
for(int i = 0; i < len; i++)
{
printf("%d ", *(array_a_host + i));
}
printf("\n");
printf("\n-------------Array b-----------------\n");
for(int i = 0; i < len; i++)
{
printf("%d ", *(array_a_rev_host + i));
}
printf("\n");
// ------------------GPU--------------------------
int *array_a_dev;
int *array_a_rev_dev;
cudaMalloc((void**) &array_a_dev, len * sizeof(int));
cudaMalloc((void**) &array_a_rev_dev, len * sizeof(int));
cudaMemcpy(array_a_dev, array_a_host, len * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(9, 1, 1);
// Version 1
array_reverse<<<dimGrid, dimBlock>>>(array_a_dev, array_a_rev_dev, len);
// Version 2
// array_reverse_shared<<<dimGrid, dimBlock>>>(array_a_dev, array_a_rev_dev, len);
// Version 3
// array_reverse_dynamic_shared<<<dimGrid, dimBlock, len*sizeof(int)>>>(array_a_dev, array_a_rev_dev, len);
cudaMemcpy(array_result_back_to_host, array_a_rev_dev, len * sizeof(int), cudaMemcpyDeviceToHost);
printf("\n-------------Array cuda--------------\n");
for(int i = 0; i < len; i++)
{
printf("%d ", *(array_result_back_to_host + i));
}
printf("\n");
free(array_a_host);
free(array_a_rev_host);
free(array_result_back_to_host);
cudaFree(array_a_dev);
cudaFree(array_a_rev_dev);
return 1;
}
|
570
|
#ifndef M_PI
#define M_PI 3.14159265358979323846 /* pi */
#endif
__global__ void mikkola_gpu(const double *manom, const double *ecc, double *eanom){
/*
Vectorized C Analtyical Mikkola solver for the eccentric anomaly.
See: S. Mikkola. 1987. Celestial Mechanics, 40, 329-334.
Adapted from IDL routine keplereq.pro by Rob De Rosa http://www.lpl.arizona.edu/~bjackson/idl_code/keplereq.pro
Args:
manom (double[]): mean anomaly, must be between 0 and pi.
ecc (double[]): eccentricity
eanom0 (double[]): array for eccentric anomaly
Return:
None: eanom (double[]): is changed by reference
Written: Devin Cody, 2019
*/
int i = threadIdx.x + blockIdx.x*blockDim.x;
double alpha, beta, aux, z, s0, s1, se0, ce0;
double f, f1, f2, f3, f4, u1, u2, u3;
alpha = (1.0 - ecc[i]) / ((4.0 * ecc[i]) + 0.5);
beta = (0.5 * manom[i]) / ((4.0 * ecc[i]) + 0.5);
aux = sqrt(beta*beta + alpha*alpha*alpha);
z = pow(fabs(beta + aux), (1.0/3.0));
s0 = z - (alpha/z);
s1 = s0 - (0.078*(pow(s0, 5))) / (1.0 + ecc[i]);
eanom[i] = manom[i] + (ecc[i] * (3.0*s1 - 4.0*(s1*s1*s1)));
se0=sin(eanom[i]);
ce0=cos(eanom[i]);
f = eanom[i]-ecc[i]*se0-manom[i];
f1 = 1.0-ecc[i]*ce0;
f2 = ecc[i]*se0;
f3 = ecc[i]*ce0;
f4 = -f2;
u1 = -f/f1;
u2 = -f/(f1+0.5*f2*u1);
u3 = -f/(f1+0.5*f2*u2+(1.0/6.0)*f3*u2*u2);
eanom[i] += -f/(f1+0.5*f2*u3+(1.0/6.0)*f3*u3*u3+(1.0/24.0)*f4*(u3*u3*u3));
}
|
571
|
#include "3d-test.cuh"
#include<iostream>
#include<stdio.h>
__host__ __device__
void scalingFunction(int array[],int x) {
for(int i=0; i<8; i++) {
array[i] = array[i] * 2;
}
}
__host__ __device__
void distributeFunction(int array[],int x,int y){
//pencilComputation p2;
for(int z=0; z<8; z++) {
// p2.outputMatrix[x][y][z] = p2.pencilVector[z];
}
}
__host__ __device__
void tOPrint(int i,int j,int k) {
pencilComputation p2;
printf("The value of i:%d, j:%d, inputMatrix:%d, pencilVector:%d",i,j);//,p2.inputMatrix[i][j][k],p2.pencilVector[k]);
}
__global__
void pencilComputationFunction(){
pencilComputation p1;
int i = threadIdx.x ;//+ blockIdx.x * blockDim.x;
int j = threadIdx.y ; // + blockIdx.y * blockDim.y;
int z = threadIdx.z;
for(int k=0; k<8 ; k++) {
printf("The value of i: %d , j: %d ,z: %d, inputMatrix Value: %d\n",i,j,z,p1.inputMatrix[i][j][k]);//,p1.inputMatrix[i][j][k],p1.pencilVector[k]);
printf("**************************************\n");
// tOPrint(i,j,k);
// p1.pencilVector[k] = p1.inputMatrix[i][j][k];
// scalingFunction(p1.pencilVector,i);
// distributeFunction(p1.pencilVector,i,j);
}
// printf("**************************************\n");
}
void pencilComputation::launcher(){
for(int i=0; i<2; i++) {
for(int j=0; j<2; j++) {
for(int k=0; k<2; k++) {
inputMatrix[i][j][k] = 10;
}
}
}
dim3 grid(1,1,1);
dim3 block(2,2,2);
// #pragma acc enter data copyin(inputMatrix) copyout(outputMatrix)
pencilComputationFunction<<<grid,block>>>();
// pencilComputationFunction<<<1,1>>>();
cudaDeviceSynchronize();
}
|
572
|
struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
}
|
573
|
#include<iostream>
using namespace std;
__global__ void add(int a,int b,int *c){
*c=a+b;
}
int main()
{
int c;
int *dev_c;
cudaMalloc(&dev_c,sizeof(int));
add<<<1,1>>>(2,7,dev_c);
cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"2+7="<<c<<endl;
cudaFree(dev_c);
return 0;
}
|
574
|
#include "includes.h"
/*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the OPS distribution.
*
* Copyright (c) 2013, Mike Giles and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Mike Giles may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file
* @brief OPS mpi+cuda run-time support routines
* @author Gihan Mudalige, Istvan Reguly
* @details Implements the runtime support routines for the OPS mpi+cuda
* backend
*/
int halo_buffer_size = 0;
char *halo_buffer_d = NULL;
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d * type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
}
|
575
|
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* This sample queries the properties of the CUDA devices present in the system. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ 0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class
{ 0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class
{ 0x60, 64 }, // Pascal Generation (SM 6.0) GP100 class
{ 0x61, 128}, // Pascal Generation (SM 6.1) GP10x class
{ 0x62, 128}, // Pascal Generation (SM 6.2) GP10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %zd bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" CUDA Cores/MP: %d\n",
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor));
printf(" Total CUDA Cores %d\n",
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %zd bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %zd bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %zd bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %zd bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.2f GHz\n",
deviceProp.memoryClockRate * 1e-6f);
printf(" Memory Bus Width: %d bits\n",
deviceProp.memoryBusWidth);
printf(" Number of asynchronous engines: %d\n",
deviceProp.asyncEngineCount);
printf(" It can execute multiple kernels concurrently: %s\n",
deviceProp.concurrentKernels ? "Yes" : "No");
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTEST PASSED\n");
}
|
576
|
#include <stdio.h>
#include <cuda_runtime.h>
#define num_threads 256
#define N (1<<20)
void fillArray(float* arr)
{
//Seed rand()
srand(42);
for (int i = 0; i < N/2; i++)
{
arr[i] = rand() % 10;
}
///Negatives
for (int i = N/2; i < N; i++)
{
arr[i] = -1*(rand() % 10);
}
}
void printArray(float* arr)
{
for (int i = 0; i < N; i++)
printf("%f ", arr[i]);
printf("\n");
}
void seq_VecAdd(float* arr_in, float* ans)
{
float total = 0;
for (int i = 0; i < N; i++)
{
total += arr_in[i];
}
*ans = total;
}
__global__ void share_VecAdd(float* arr_in, float* arr_out)
{
__shared__ float sVec[num_threads];
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
//Loading shared memory
if (i < N)
{
sVec[threadIdx.x] = arr_in[i];
}
__syncthreads();
//reduction in shared mem
for(unsigned int j = blockDim.x/2; j > 0; j >>= 1)
{
if (threadIdx.x < j)
{
sVec[threadIdx.x] += sVec[threadIdx.x + j];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
atomicAdd(arr_out,sVec[threadIdx.x]);
}
}
__global__ void glob_VecAdd(float* arr_in, float* arr_out)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
for(unsigned int j = blockDim.x/2; j > 0; j = j/2)
{
if (threadIdx.x < j)
{
arr_in[i] += arr_in[i + j];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
atomicAdd(arr_out,arr_in[i]);
}
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
int main (void)
{
printf("-------------------N = %d----------------\n", N);
float ans;
//Declare host array
float* arr_h;
float* out_h;
//Declare device array
float* arr_in_d;
float* arr_out_d;
//Set amount of memory required for arrays
const int num_bytes = N*sizeof(float);
//Allocate memory for host array
arr_h = (float*)malloc(num_bytes);
out_h = (float*)malloc(num_bytes);
//Allocate memory to the device
cudaMalloc((void**)&arr_in_d, num_bytes);
cudaMalloc((void**)&arr_out_d, num_bytes);
//Fill host arrays with random values
fillArray(arr_h);
//////Sequential////////
////Start Timing
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
cudaEventRecord(launch_begin,0);
//Sequential calculation for checking
seq_VecAdd(arr_h, &ans);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float seq_time = 0;
cudaEventElapsedTime(&seq_time, launch_begin, launch_end);
float cputp = 1e-9*N/(seq_time*1e-3); //divive by 10^9 for giga and times by 10^3 to get seconds
printf("CPU: Run Time: %f ms\n", seq_time);
printf("CPU: Speed Up: %fx\n", seq_time/seq_time);
printf("CPU: Throughputs: %f GFLOP/s\n\n",cputp );
//////////////GPU Shared///////////////////
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
int num_blocks = N/num_threads;
if (N % num_threads)
num_blocks++;
cudaEventRecord(launch_begin,0);
cudaMemcpy(arr_in_d, arr_h, num_bytes, cudaMemcpyHostToDevice);
share_VecAdd<<<num_blocks,num_threads>>>(arr_in_d, arr_out_d );
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float share_time = 0;
cudaEventElapsedTime(&share_time, launch_begin, launch_end);
cudaMemcpy(out_h, arr_out_d, num_bytes, cudaMemcpyDeviceToHost);
checkCUDAError("share_VecAdd");
if (out_h[0] == ans)
{
printf("Number of threads = %d\n\n", num_threads);
printf("GPU(Shared) Tile Size = %d: Run Time: %f ms\n", num_threads, share_time);
printf("GPU(Shared) Tile Size = %d: Speed Up: %fx\n", num_threads, seq_time/share_time);
printf("GPU(Shared) Tile Size = %d: Throughputs: %f GFLOP/s\n", num_threads, 1e-9*N/(share_time*1e-3));
printf("GPU(Shared) Tile Size = %d: Ratio of Throughputs: %f\n\n", num_threads, 1e-9*N/(share_time*1e-3)/cputp);
}
else
{
printf("GPU Shared Failed : %f\n", out_h[0]);
}
/////////////GPU Global/////////////////
cudaMemset(arr_out_d, 0, num_bytes);
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
if (N % num_threads)
num_blocks++;
cudaEventRecord(launch_begin,0);
cudaMemcpy(arr_in_d, arr_h, num_bytes, cudaMemcpyHostToDevice);
glob_VecAdd<<<num_blocks,num_threads>>>(arr_in_d, arr_out_d);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float glob_time = 0;
cudaEventElapsedTime(&glob_time, launch_begin, launch_end);
cudaMemcpy(out_h, arr_out_d, num_bytes, cudaMemcpyDeviceToHost);
checkCUDAError("glob_VecAdd");
if (out_h[0] == ans)
{
printf("GPU(Global): Run Time: %f ms\n", glob_time);
printf("GPU(Global): Speed Up: %fx\n", seq_time/glob_time);
printf("GPU(Global): Throughputs: %f GFLOP/s\n", 1e-9*N/(glob_time*1e-3));
printf("GPU(Global): Ratio of Throughputs: %f\n\n", 1e-9*N/(glob_time*1e-3)/cputp);
}
else
{
printf("GPU Global Failed : %f\n", out_h[0]);
}
//Free host array
free(arr_h);
free(out_h);
//Free device arrays
cudaFree(arr_in_d);
cudaFree(arr_out_d);
return 0;
}
|
577
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "config.cuh"
using namespace std;
/*
* Mapping function to be run for each input. The input must be read from memory
* and the the key/value output must be stored in memory at pairs. Multiple
* pairs may be stored at the next postiion in pairs, but the maximum number of
* key/value pairs stored must not exceed NUM_KEYS.
*/
__device__ void mapper(input_type *input, KeyValuePair *pairs)
{
// We set the key of each input to 0.
pairs->key = 0;
char ch = input->ch;
//pairs->key = ch;
// We check if the input array has a space or a new line and set the value accordingly.
//If so this will count the number of words in a file.
if (ch == ' '||ch == '\n')
{
pairs->value = 1;
}
else
{
pairs->value = 0;
}
}
/*
* Reducing function to be run for each set of key/value pairs that share the
* same key. len key/value pairs may be read from memory, and the output
* generated from these pairs must be stored at output in memory.
*/
__device__ void reducer(KeyValuePair *pairs, int len, output_type *output)
{
int wordCount = 0;
for (KeyValuePair *pair = pairs; pair != pairs + len; pair++)
{
if(pair->value == 1)
{
wordCount++;
}
}
// After calculating number of words in an input file, we will move the wordCount into the output variable.
*output = wordCount;
}
/*
* Main function that runs a map reduce job.
*/
int main(int argc, char const *argv[])
{
printf("\n My first Program. I am here\n");
// Allocate host memory
size_t input_size = NUM_INPUT * sizeof(input_type);
size_t output_size = NUM_OUTPUT * sizeof(output_type);
input_type *input = (input_type *) malloc(input_size);
output_type *output = (output_type *) malloc(output_size);
//Reading an input file and copying it into an input array
FILE *f;
char c;
//char input[1000000];
f=fopen("test.txt","rt");
int i=0;
while((c=fgetc(f))!=EOF)
{
//printf("%c",c);
input[i].ch = c;
i++;
}
printf("\n The array is: %s", input);
fclose(f);
// Run the Map Reduce Job
runMapReduce(input, output);
// Iterate through the output array
for (size_t i = 0; i < NUM_OUTPUT; i++)
{
printf("The total number of words in the file are: %d\n", output[i]);
}
// Free host memory
free(input);
free(output);
return 0;
}
|
578
|
#include <stdio.h>
__global__ void matVectMult(float* d_B, float* d_C, float* d_A, int numElements){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.f;
if(row < numElements && col < numElements) {
for(int i=0; i<numElements; i++)
sum += d_B[row+col*i];
d_A[row] = sum + d_C[row];
}
}
int main() {
// size of matrix and vector
int n = 512;
size_t vectorSize = n*sizeof(float);
size_t matrixSize = n*n*sizeof(float);
float *h_vA;
float *h_mB;
float *h_vC;
float *d_A;
float *d_B;
float *d_C;
// allocate data in host
h_vA = (float*) malloc( vectorSize );
h_mB = (float*) malloc( matrixSize );
h_vC = (float*) malloc( vectorSize );
// matrix initialization
for(int i=0; i<n; i++)
h_vC[i] = i;
for(int i=0; i<n*n; i++)
h_mB[i] = i;
// allocate data in device
cudaMalloc((void**)&d_A, vectorSize);
cudaMalloc((void**)&d_B, matrixSize);
cudaMalloc((void**)&d_C, vectorSize);
// copy inputs to device
cudaMemcpy(d_C, h_vC, vectorSize ,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_mB, matrixSize ,cudaMemcpyHostToDevice);
// launch kernel
dim3 DimThreadsPerBlock(32,32,1);
dim3 DimBlocks(ceil((n*n)/32.0),ceil(n/32.0),1);
matVectMult<<< DimBlocks, DimThreadsPerBlock>>>(d_B,d_C,d_A, n);
// copy output to host
cudaMemcpy(h_vA, d_A, vectorSize,cudaMemcpyDeviceToHost);
// freeing space
free(h_vA);
free(h_mB);
free(h_vC);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
579
|
#include <iostream>
#include <chrono>
constexpr size_t kSize = 1000000;
class Stopwatch {
public:
using TimePoint = decltype(std::chrono::high_resolution_clock::now());
Stopwatch(): start(std::chrono::high_resolution_clock::now()) {}
~Stopwatch() {
end = std::chrono::high_resolution_clock::now();
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< " us\n";
}
private:
TimePoint start;
TimePoint end;
};
void Transfer0(float* orig, float* target0, float* target1, float* target2) {
cudaMemcpy(target0, orig, kSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(target1, orig, kSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(target2, orig, kSize * sizeof(float), cudaMemcpyHostToDevice);
};
void Transfer1(float* orig, float* target0, float* target1, float* target2) {
cudaStream_t stream[3];
float* targets[3] = {target0, target1, target2};
for (int i = 0; i < 3; ++i) {
cudaStreamCreate(&stream[i]);
}
for (int i = 0; i < 3; ++i) {
cudaMemcpyAsync(targets[i], orig, kSize * sizeof(float), cudaMemcpyHostToDevice, stream[i]);
}
for (int i = 0; i < 3; ++i) {
cudaStreamDestroy(stream[i]);
}
};
int main() {
float* original = new float[kSize];
float* target0, *target1, *target2;
cudaMalloc(&target0, sizeof(float) * kSize);
cudaMalloc(&target1, sizeof(float) * kSize);
cudaMalloc(&target2, sizeof(float) * kSize);
{
Stopwatch s;
Transfer0(original, target0, target1, target2);
}
{
Stopwatch s;
Transfer1(original, target0, target1, target2);
}
cudaFree(target0);
cudaFree(target1);
cudaFree(target2);
return 0;
}
|
580
|
#include <stdio.h>
#include <stdlib.h>
__global__ void
global_reduction_kernel(float *data_out, float *data_in, int stride, int size)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
if (idx_x + stride < size) {
data_out[idx_x] += data_in[idx_x + stride];
}
}
void global_reduction(float *d_out, float *d_in, int n_threads, int size)
{
int n_blocks = (size + n_threads - 1) / n_threads;
for (int stride = 1; stride < size; stride *= 2) {
global_reduction_kernel<<<n_blocks, n_threads>>>(d_out, d_in, stride, size);
}
}
|
581
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__
void vector_add(float *out, float *a, float *b, int n) {
int i = blockDim.x*blockIdx.x+threadIdx.x;
if (i<n)
out[i] = a[i] + b[i];
}
int main(){
float *a, *b, *out;
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f;
b[i] = 2.0f;
}
// Main function
int block_size=256;
dim3 gridDim((N-1)/block_size+1,1,1);
dim3 blockDim(block_size,1,1);
//transfer memory
float *d_A,*d_B,*d_out;
int size = sizeof(float) * N;
cudaMalloc((void **)&d_A,size);
cudaMalloc((void **)&d_B,size);
cudaMalloc((void **)&d_out,size);
cudaMemcpy(d_A,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,b,size,cudaMemcpyHostToDevice);
vector_add<<<gridDim,blockDim>>>(d_out, d_A, d_B, N);
cudaMemcpy(out,d_out,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_out);
// Verification
for(int i = 0; i < N; i++){
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
}
|
582
|
//
// vectorAdd.cu
// vector_add_parallel
//
// Created by poohRui on 2018/10/23.
// Copyright © 2018 poohRui. All rights reserved.
//
#include <stdio.h>
#include <math.h>
#include <cuda.h>
// Predefine the size of block
#define BLOCK_DIM 256
/**
* This is a kernel function which mainly deal with the computation in vector add
*
* @param A One of the vector to be add on device
* @param B One of the vector to be add on device
* @param C The result of vector add
* @param n The lenght of the vector
*/
__global__
void vecAddKernel(float* A, float* B, float* C, int n){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n){
C[i] = A[i] + B[i];
}
}
/**
* This function is called stub function which lauching a kernel.
*
* @param h_A One of the vector data will be add.
* @param h_B One of the vector data will be add.
* @param h_C The result of vector add
* @param n The length of the vector
*/
void vecAddParallel(float* h_A,
float* h_B,
float* h_C,
int n){
// Using device parallel calculate the result and finally print the time
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
// Allocates object in the device global memory
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
// Memory data transfer from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel to do the computation on device
vecAddKernel<<<ceil(n/(float)(BLOCK_DIM)), BLOCK_DIM>>>(d_A, d_B, d_C, n);
// Transfer back the result from d_C to h_C
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Parallel invoke vectorAdd function need %.1fs.\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
583
|
#include "includes.h"
cudaError_t cuda();
// clamp x to range [a, b]
__global__ void kernel(){
}
|
584
|
/*!
* @file CudaHelpers.cu
* @author Zdenek Travnicek
* @date 15.2.2012
* @date 16.2.2013
* @copyright Institute of Intermedia, CTU in Prague, 2012 - 2013
* Distributed under modified BSD Licence, details in file doc/LICENSE
*
*/
#include <cuda.h>
namespace yuri {
namespace cuda {
void* map_array(cudaArray *array)
{
//texture<uchar4, cudaTextureType2D, cudaReadModeElementType> texRef;
//cudaBindTextureToArray(&texRef,array);
return 0;
}
}
}
|
585
|
/*sums square matrix by reduction*/
__global__ void sum(double sum, double * data, double sz) {
const int colIdx = threadIdx.x;
const int rowIdx = threadIdx.y;
int linearIdx = rowIdx*sz + colIdx;
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
sdata[linearIdx] = data[linearIdx];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2) {
if (linearIdx % (2*s) == 0) {
sdata[linearIdx] += sdata[linearIdx + s];
}
__syncthreads();
}
// write result for this block to global mem
if (linearIdx == 0) {
sum = sdata[0];
}
}
|
586
|
#include "softmax-cross-entropy-grad.hh"
#include "graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
SoftmaxCrossEntropyGrad::SoftmaxCrossEntropyGrad(Op* y, Op* logits)
: Op("softmax_cross_entropy_grad", y->shape_get(), {y, logits})
{}
void SoftmaxCrossEntropyGrad::compile()
{
auto& g = Graph::instance();
auto& cy = g.compiled(preds()[0]);
auto& clogits = g.compiled(preds()[1]);
std::size_t rows = cy.out_shape[0];
std::size_t cols = cy.out_shape[1];
Shape out_shape = cy.out_shape;
dbl_t* out_data = tensor_alloc(rows * cols);
auto out_node = rt::Node::op_softmax_cross_entropy_grad(cy.out_data, clogits.out_data, out_data,
rows, cols,
{cy.out_node, clogits.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
587
|
#include <stdio.h>
#include <stdlib.h>
/**
* In this section, we will discover concurrent operation in CUDA
* 1) blocks in grid: concurrent tasks, no gurantee their order of execution (no synchronization)
* 2) warp in blocks: concurrent threads, explicitly synchronizable (it will be discussed in next section)
* 3) thread in warp: implicitly synchronized
*/
__global__ void idx_print()
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int warp_idx = threadIdx.x / warpSize;
int lane_idx = threadIdx.x & (warpSize - 1);
if ((lane_idx & (warpSize/2 - 1)) == 0)
// thread, block, warp, lane"
printf(" %5d\t%5d\t %2d\t%2d\n", idx, blockIdx.x, warp_idx, lane_idx);
}
int main(int argc, char* argv[])
{
if (argc == 1) {
puts("Please put Block Size and Thread Block Size..");
puts("./cuda_thread_block [grid size] [block size]");
puts("e.g.) ./cuda_thread_block 4 128");
exit(1);
}
int gridSize = atoi(argv[1]);
int blockSize = atoi(argv[2]);
puts("thread, block, warp, lane");
idx_print<<<gridSize, blockSize>>>();
cudaDeviceSynchronize();
}
|
588
|
//16CO212 16CO249
//Computer Architecture Lab Assignment 0
//Question 1
#include <stdio.h>
//Printing Properties of the device
void printDevProp(cudaDeviceProp device_properties)
{
printf("\tMajor revision number: %d\n", device_properties.major);
printf("\tMinor revision number: %d\n", device_properties.minor);
printf("\tName: %s\n", device_properties.name);
printf("\tTotal shared memory per block: %u\n", device_properties.sharedMemPerBlock);
printf("\tTotal global memory: %u\n", device_properties.totalGlobalMem);
printf("\tTotal registers per block: %d\n", device_properties.regsPerBlock);
printf("\tWarp size: %d\n", device_properties.warpSize);
printf("\tMaximum memory pitch: %u\n", device_properties.memPitch);
printf("\tMaximum threads per block: %d\n", device_properties.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("\tMaximum dimension %d of block: %d\n", i, device_properties.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("\tMaximum dimension %d of grid: %d\n", i, device_properties.maxGridSize[i]);
printf("\tClock rate: %d\n", device_properties.clockRate);
printf("\tTotal constant memory: %u\n", device_properties.totalConstMem);
printf("\tTexture alignment: %u\n", device_properties.textureAlignment);
printf("\tConcurrent copy and execution: %s\n", (device_properties.deviceOverlap ? "Yes" : "No"));
printf("\tNumber of multiprocessors: %d\n", device_properties.multiProcessorCount);
printf("\tKernel execution timeout: %s\n", (device_properties.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// variable to save number of CUDA devices
int device_count;
cudaGetDeviceCount(&device_count);
printf("CUDA Device Query Code\n");
printf("\tNumber of CUDA devices :%d .\n", device_count);
// Iterate through all devices
for (int i = 0; i < device_count; ++i)
{
// Get device properties
printf("\nCUDA Device number %d\n", i);
cudaDeviceProp device_properties;
cudaGetDeviceProperties(&device_properties, i);
printDevProp(device_properties);
}
return 0;
}
|
589
|
#include "includes.h"
__global__ void THCudaTensor_kernel_indexSelect( float *tensor, float *src, long* src_stride, float *index, long src_nDim, int dim, long idx_size, long tensor_size, long size_dim )
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = tensor_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int targetIdx = 0;
int srcIdx = 0;
for (int d=0; d<src_nDim; d++)
{
if (d < dim)
{
long stride_d = src_stride[d] / size_dim;
coeff = leftover / stride_d;
leftover -= coeff * stride_d;
targetIdx += coeff * stride_d * idx_size;
srcIdx += coeff * src_stride[d];
}
else if (d > dim)
{
coeff = leftover / src_stride[d];
leftover -= coeff * src_stride[d];
targetIdx += coeff * src_stride[d];
srcIdx += coeff * src_stride[d];
}
}
tensor[targetIdx + i*src_stride[dim]] = src[srcIdx + ((int)(index[i])-1)*src_stride[dim]];
}
}
}
|
590
|
#include "includes.h"
__global__ void gpu_sobel_kernel_shared(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y-2) + y;
int j = blockIdx.x*(BLOCKDIM_X-2) + x;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {}
else {
//mainstream
tuile[x][y] = Source[globalIndex];
__syncthreads();
u_char val;
if ((x>0)&&(y>0)&&(x<BLOCKDIM_X-1)&&(y<BLOCKDIM_Y-1)) {
val = std::abs(tuile[x-1][y-1] + tuile[x-1][y] + tuile[x-1][y+1] -\
(tuile[x+1][y-1] + tuile[x+1][y] + tuile[x+1][y+1]));
Resultat[globalIndex] = val + std::abs(tuile[x-1][y-1] + tuile[x][y-1] + tuile[x+1][y-1] -\
(tuile[x-1][y+1] + tuile[x][y+1] + tuile[x+1][y+1]));
}
}
}
|
591
|
#include "includes.h"
__global__ void inverse_transform(float *in, float *out, int height, int width) {
// block elements
int my_x, k, t;
my_x = blockIdx.x * blockDim.x + threadIdx.x;
// iterate through each element, going from frequency to time domain
for (k = 0; k < height; k++) {
// difference, which will be used to subtract off
float realSum = 0;
// iterate through the input element
for (t = 0; t < width; t++) {
float angle = 2 * M_PI * (my_x * height + t) * (my_x * height + k) / height;
realSum += in[my_x * height + t] * cos(angle);
}
out[my_x * height + k] = (realSum / height);
}
}
|
592
|
/*This mixed-precision matrix-vector multiplication algorithm is based on cublasSgemv NVIDIA's CUBLAS 1.1.
*/
#define LOG_THREAD_COUNT (7)
#define THREAD_COUNT (1 << LOG_THREAD_COUNT)
#define CTAS (64)
#define IDXA(row,col) (lda*(col)+(row))
#define IDXX(i) (startx + ((i) * incx))
#define IDXY(i) (starty + ((i) * incy))
#define TILEW_LOG (5)
#define TILEW (1 << TILEW_LOG)
#define TILEH_LOG (5)
#define TILEH (1 << TILEH_LOG)
#define X_ELEMS_PER_THREAD (4)
#define IINC (CTAS * THREAD_COUNT)
#define JINC (THREAD_COUNT * X_ELEMS_PER_THREAD)
#define XINC (THREAD_COUNT)
__shared__ float XX[TILEH];
__shared__ float AA[(TILEH+1)*TILEW];
__global__ void sgemvn_mixedprecis(const float *A, const float *x,float *y, int m, int n, int lda, int incx, int incy)
{
__shared__ float XX[JINC];
int i, ii, j, jj, idx, incr, tid;
double sdot;
int startx;
int starty;
tid = threadIdx.x;
startx = (incx >= 0) ? 0 : ((1 - n) * incx);
starty = (incy >= 0) ? 0 : ((1 - m) * incy);
for (i = 0; i < m; i += IINC) {
ii = i + blockIdx.x * THREAD_COUNT;
if (ii >= m) break;
ii += tid;
sdot = 0.0f;
for (j = 0; j < n; j += JINC) {
int jjLimit = min (j + JINC, n);
incr = XINC * incx;
jj = j + tid;
__syncthreads ();
idx = IDXX(jj);
if (jj < (jjLimit - 3 * XINC)) {
XX[tid+0*XINC] = x[idx + 0 * incr];
XX[tid+1*XINC] = x[idx + 1 * incr];
XX[tid+2*XINC] = x[idx + 2 * incr];
XX[tid+3*XINC] = x[idx + 3 * incr];
}
else if (jj < (jjLimit - 2 * XINC)) {
XX[tid+0*XINC] = x[idx + 0 * incr];
XX[tid+1*XINC] = x[idx + 1 * incr];
XX[tid+2*XINC] = x[idx + 2 * incr];
}
else if (jj < (jjLimit - 1 * XINC)) {
XX[tid+0*XINC] = x[idx + 0 * incr];
XX[tid+1*XINC] = x[idx + 1 * incr];
}
else if (jj < (jjLimit - 0 * XINC)) {
XX[tid+0*XINC] = x[idx + 0 * incr];
}
__syncthreads ();
if (ii < m) { /* if this row is active, accumulate dp */
idx = IDXA(ii, j);
incr = lda;
jjLimit = jjLimit - j;
jj = 0;
while (jj < (jjLimit - 5)) {
sdot += A[idx + 0*incr] * XX[jj+ 0];
sdot += A[idx + 1*incr] * XX[jj+ 1];
sdot += A[idx + 2*incr] * XX[jj+ 2];
sdot += A[idx + 3*incr] * XX[jj+ 3];
sdot += A[idx + 4*incr] * XX[jj+ 4];
sdot += A[idx + 5*incr] * XX[jj+ 5];
jj += 6;
idx += 6 * incr;
}
while (jj < jjLimit) {
sdot += A[idx + 0*incr] * XX[jj+ 0];
jj += 1;
idx += 1 * incr;
}
}
}
if (ii < m) {
idx = IDXY(ii);
y[idx] = sdot;
}
}
}
|
593
|
__global__ void sgemm (float *A, float *B, float *C, int N)
{
// Thread identifiers
const int r = blockIdx.x; // Row ID
const int c = blockIdx.y; // Col ID
// Compute a single element (loop a K)
float acc = 0.0f;
for (int k = 0; k < N; k++) {
acc += A[k * N + r] * B[c * N + k];
}
// Store the result
C[c * N + r] = acc;
}
|
594
|
#include "includes.h"
__global__ void rgbUtoGreyF_kernel(int width, int height, unsigned int* rgbU, float* grey) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if ((x < width) && (y < height)) {
int index = y * width + x;
unsigned int rgb = rgbU[index];
float r = (float)(rgb & 0xff)/255.0;
float g = (float)((rgb & 0xff00) >> 8)/255.0;
float b = (float)((rgb & 0xff0000) >> 16)/255.0;
grey[index] =
(0.29894 * r)
+ (0.58704 * g)
+ (0.11402 * b);
}
}
|
595
|
// includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part3: implement the kernel
__global__ void max_parallel(double *cmax, double *temp,int ndimp, double maxac)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
extern __shared__ double partialResult[];
int i;
partialResult[tid]=0.0;
if(iindex<ndimp)
partialResult[tid]=temp[iindex];
__syncthreads();
// if(temp[iindex]==maxac)
// printf("max here %d %d \n",tid,blockIdx.x);
//if(tid==0)
// printf("sero %d\n",blockIdx.x);
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0) {
if(partialResult[tid+s]>partialResult[tid])
partialResult[tid]=partialResult[tid + s];
}
__syncthreads();
}
__syncthreads();
if(tid==0)
{
cmax[blockIdx.x]=partialResult[0];
//temp[blockIdx.x]=partialResult[0];
}
__syncthreads();
}
/////////////////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
double *h_c, *h_temp;
double maxc=-1.0;
double tmax=-1.0;
int dimA = 256 * 1024; // 256K elements (1MB total)
dimA=256*2048;
dimA=2097152;
// pointer for device memory
int *d_b, *d_a;
double *d_c, *d_temp;
// define grid and block size
int numThreadsPerBlock = 128;
// Part 1: compute number of blocks needed based on
// array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
srand (time(NULL));
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
cudaMalloc( (void **) &d_b, memSize );
int smemSize = numThreadsPerBlock * sizeof(double);
size_t dmemSize = numBlocks * numThreadsPerBlock * sizeof(double);
h_c = (double *) malloc(dmemSize);
h_temp = (double *) malloc(dmemSize);
cudaMalloc( (void **) &d_c, dmemSize );
cudaMalloc( (void **) &d_temp, dmemSize );
int imax;
int ccount=0;
int j=0;
// for( j=0; j<1000; j++)
// {
// tmax=-1;
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
h_c[i]=(rand()%100000000);
if(h_c[i]>tmax)
{
tmax=h_c[i];
imax=i;
}
//printf(" %g ",h_c[i]);
}
printf("\n\n\n %d %f %d\n", dimA, tmax, imax);
// Copy host array to device array
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_c, h_c, dmemSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_temp, h_c, dmemSize, cudaMemcpyHostToDevice );
// device to host copy
cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
for(int i=0;i<numBlocks;i++)
h_temp[i]=0;
cudaMemcpy(d_temp, h_temp, numBlocks*sizeof(double), cudaMemcpyHostToDevice);
max_parallel<<<numBlocks,numThreadsPerBlock,smemSize>>>(d_temp,d_c,dimA,tmax);
cudaThreadSynchronize();
cudaMemcpy(h_temp, d_temp, numBlocks*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0;i<numBlocks;i++)
{
if(h_temp[i]>maxc) maxc=h_temp[i];
//printf(" %f ",h_temp[i]);
}
if(maxc==tmax) ccount++;
printf("\n\n\nnumblocks %d %d max=%f %f %d\n",j, numBlocks, maxc, tmax, ccount);
// }
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are
// correct and there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
596
|
#include <iostream>
#include <iterator>
#include <queue>
#include <vector>
#include <math.h>
#include <assert.h>
#define CONV_MATRIX_SIZE 10
#define MAX_POOL_SIZE 10
#define TRIBE_MIN_POPULATION 15
#define RESET "\033[0m"
#define BOLDBLACK "\033[1m\033[40m" /* Bold Black */
#define BOLDRED "\033[1m\033[41m" /* Bold Red */
#define BOLDGREEN "\033[1m\033[42m" /* Bold Green */
#define BOLDYELLOW "\033[1m\033[43m" /* Bold Yellow */
#define BOLDBLUE "\033[1m\033[44m" /* Bold Blue */
#define BOLDMAGENTA "\033[1m\033[45m" /* Bold Magenta */
#define BOLDCYAN "\033[1m\033[46m" /* Bold Cyan */
#define BOLDWHITE "\033[1m\033[47m" /* Bold White */
using namespace std;
typedef pair<int, int> Member; // pair<row, column>
typedef pair<int, int> Point; // pair<row, column>
typedef vector<Member> Tribe;
template <typename T>
struct Matrix {
vector<T> matrix;
int height, width;
};
class Map {
private:
vector<int> grid;
const int M, N; // M = grid height; N = grid width
const int range;
vector<Tribe> tribes;
vector<int> map; // the map records the citizenship of life on every coordinate
void printIntMatrix(Matrix<int> mat)
{
for (int i = 0; i < mat.height; i++)
{
for (int j = 0; j < mat.width; j++)
{
int pop = mat.matrix[i * mat.width + j];
if (pop < 10 || pop < 0)
cout << pop << " ";
else
cout << pop;
}
cout << endl;
}
}
void printMemberMatrix(Matrix<Member> mat, Matrix<int> value)
{
for (int i = 0; i < mat.height; i++)
{
for (int j = 0; j < mat.width; j++)
{
Member leader = mat.matrix[i * mat.width + j];
int pop;
if (leader.first == -1) pop = 0;
else pop = value.matrix[leader.first * value.width + leader.second];
if (pop < 10)
cout << pop << " ";
else
cout << pop;
}
cout << endl;
}
}
Matrix<int> convolution()
{
int conv_result_height = M - CONV_MATRIX_SIZE + 1;
int conv_result_width = N - CONV_MATRIX_SIZE + 1;
vector<int> conv_result(conv_result_height * conv_result_width, 0);
for (int grid_r = 0; grid_r < conv_result_height; grid_r++) {
for (int grid_c = 0; grid_c < conv_result_width; grid_c++) {
int total = 0;
for (int mat_r = 0; mat_r < CONV_MATRIX_SIZE; mat_r ++) {
for (int mat_c = 0; mat_c < CONV_MATRIX_SIZE; mat_c++) {
total += grid[(grid_r + mat_r) * N + (grid_c + mat_c)];
}
}
conv_result[grid_r * conv_result_width + grid_c] = total;
}
}
Matrix<int> result;
result.matrix = conv_result;
result.height = conv_result_height;
result.width = conv_result_width;
return result;
}
Matrix<Member> max_pooling(vector<int> input_matrix, int input_h, int input_w)
{
int pool_result_height = input_h - MAX_POOL_SIZE + 1;
int pool_result_width = input_w - MAX_POOL_SIZE + 1;
vector<Member> pool_result(pool_result_height * pool_result_width, pair<int, int>(-1,-1));
for (int grid_r = 0; grid_r < pool_result_height; grid_r++) {
for (int grid_c = 0; grid_c < pool_result_width; grid_c++) {
int max = 0;
Member max_coord;
for (int mat_r = 0; mat_r < MAX_POOL_SIZE; mat_r ++) {
for (int mat_c = 0; mat_c < MAX_POOL_SIZE; mat_c++) {
int cur = input_matrix[(grid_r + mat_r) * input_w + (grid_c + mat_c)];
if (cur >= max) {
max = cur;
max_coord = pair<int, int>(grid_r + mat_r, grid_c + mat_c);
}
}
}
if (max >= TRIBE_MIN_POPULATION)
pool_result[grid_r * pool_result_width + grid_c] = max_coord;
}
}
Matrix<Member> result;
result.matrix = pool_result;
result.height = pool_result_height;
result.width = pool_result_width;
return result;
}
/* find the shortest distance to the tribe of given index */
float distance_to_tribe(Member p, int tribe_index)
{
Tribe t = tribes[tribe_index];
float shortest_dist = INFINITY;
for (auto member : t)
{
float dist = (member.first - p.first) * (member.first - p.first) +
(member.second - p.second) * (member.second - p.second);
if (dist < shortest_dist) shortest_dist = dist;
}
return shortest_dist;
}
int getTribeMembership(Member p)
{
// p coordinates out of bound
bool withinBound = (0 <= p.first && p.first < M) && (0 <= p.second && p.second < N);
if (!withinBound) return -1;
// p is not alive
if (grid[p.first*N+p.second] == 0) return -1;
int tribe_index = map[p.first * N + p.second] == tribes.size() ? -1 : map[p.first * N + p.second];
return tribe_index;
}
bool isInTribe(Member p, Tribe t)
{
for (auto m: t) {
if (m.first == p.first && m.second == p.second) return true;
}
return false;
}
int searchNearbyTribe(vector<Member> group_of_wanderers)
{
int radius = range/2;
float shortest_distance = INFINITY;
int closest_tribe = -1;
for (auto p: group_of_wanderers) {
for (int r = p.first - radius; r <= p.first + radius; r++) {
for (int c = p.second - radius; c <= p.second + radius; c++) {
Member neighbor = pair<int,int>(r,c);
float delta_r = abs(neighbor.first - p.first);
float delta_c = abs(neighbor.second - p.second);
if (delta_c * delta_c + delta_r * delta_r > radius * radius) continue; // distance > radius
int t = getTribeMembership(neighbor);
if (t != -1) {
float d = distance_to_tribe(p, t);
if (d < shortest_distance) {
shortest_distance = d;
closest_tribe = t;
}
}
}
}
}
return closest_tribe;
}
void register_new_tribe(Tribe newTribe)
{
if (newTribe.size() >= TRIBE_MIN_POPULATION) {
// qualified to establish a new tribe
int tribe_index = tribes.size();
tribes.push_back(newTribe);
}
else {
// registration failed
for (auto m : newTribe)
map[m.first * N + m.second] = -1;
}
}
/*
* Approximate tribe centers using convolution and max-pooling
* For each center, add all members within range to the tribe in tribes vector
*/
void init_tribes()
{
auto conv = this->convolution();
auto max_pool = this->max_pooling(conv.matrix, conv.height, conv.width);
for (int r = 0; r < max_pool.height; r++) {
for (int c = 0; c < max_pool.width; c++) {
// get tribe leader from max pooling result
pair<int, int> tribe_leader= max_pool.matrix[r * max_pool.height + c];
// if there's no tribe leader in this area
if (tribe_leader.first == -1) continue;
// collect all life in matrix starting with the coordinates of this tribe leader
Tribe newTribe;
for (int tr = tribe_leader.first; tr < tribe_leader.first + CONV_MATRIX_SIZE; tr++) {
for (int tc = tribe_leader.second; tc < tribe_leader.second + CONV_MATRIX_SIZE; tc++) {
if (grid[tr * N + tc] > 0) {
Member p = pair<int, int>(tr, tc);
// if already a member of some tribe
if (getTribeMembership(p) != -1) continue;
// if already collected
if (isInTribe(p, newTribe)) continue;
// find all neighbors within range using BFS
vector<Member> valid_neighbors = BFS(p, newTribe);
// the group looks for a nearby tribe to settle down
int nearbyT = searchNearbyTribe(valid_neighbors);
if (nearbyT == -1) // if there's no tribe nearby
{
newTribe.insert(newTribe.end(), valid_neighbors.begin(), valid_neighbors.end());
for (auto n : valid_neighbors)
map[n.first * N + n.second] = tribes.size();
}
else // found nearby existing tribe, add this group of lives to the tribe found
{
tribes[nearbyT].insert(newTribe.end(), valid_neighbors.begin(), valid_neighbors.end());
for (auto n : valid_neighbors)
map[n.first * N + n.second] = nearbyT;
}
}
}
}
register_new_tribe(newTribe);
}
}
}
/* Find all members of each tribe using BFS */
vector<Member> BFS(Member seed, Tribe newTribe) {
// initialize queue
vector<Member> queue;
queue.push_back(seed);
vector<Member> visited = newTribe;
vector<Member> new_neighbors = {seed};
//find neighbors
int radius = range/2;
while (!queue.empty())
{
Member p = queue.front();
if (!isInTribe(p, visited)) {
visited.push_back(p);
if (!isInTribe(p, new_neighbors)) new_neighbors.push_back(p);
for (int r = p.first - radius; r <= p.first + radius; r++) {
for (int c = p.second - radius; c <= p.second + radius; c++) {
if (r == p.first && c == p.second) continue; // neighbor is p itself
bool withinBound = (0 <= r && r < M) && (0 <= c && c < N);
if (!withinBound) continue; // neighbor is out of bound
if (grid[r*N+c] == 0) continue; // neighbor is not alive
Member neighbor = pair<int,int>(r,c);
if (getTribeMembership(neighbor) != -1) continue; // neighbor is in other tribe
else if (isInTribe(neighbor, visited)) continue; // neighbor is already visited
else if (isInTribe(neighbor, queue)) continue; // neighbor is already in queue
float delta_r = abs(neighbor.first - p.first);
float delta_c = abs(neighbor.second - p.second);
if (delta_c * delta_c + delta_r * delta_r > radius * radius) continue; // neighbor is too far: distance > radius
queue.push_back(neighbor);
}
}
}
queue.erase(queue.begin());
}
return new_neighbors;
}
public:
vector<string> colors = {BOLDYELLOW, BOLDGREEN, BOLDBLUE, BOLDRED, BOLDMAGENTA, BOLDCYAN, BOLDBLACK, BOLDWHITE};
Map(int M, int N, int range, vector<int> grid) :
M(M), N(N), range(range), grid(grid)
{
vector<int> empty(grid.size(), -1);
map = empty;
}
vector<Tribe> get_tribes()
{
tribes.clear();
this->init_tribes();
int count = 0;
for (auto t : tribes) {
cout << "Tribe " << count << ": " << t.size() << endl;
count++;
}
return tribes;
}
};
|
597
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <cuda_runtime.h>
#define PI 3.14159265358979323846
#define N 100000
#define BLOCK_SIZE 1024
__device__ void BoxMuller(float u1, float u2, float *n1, float *n2)
{
float r = sqrtf(-2*logf(u1));
float theta = 2*PI*(u2);
*n1 = r*sinf(theta);
*n2 = r*cosf(theta);
}
__global__ void norm_transform(float *dev_u1, float *dev_u2,
float *dev_n1, float *dev_n2, int size)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < size)
{
float res1;
float res2;
BoxMuller(dev_u1[tid], dev_u2[tid], &res1, &res2);
dev_n1[tid] = res1;
dev_n2[tid] = res2;
}
}
int main()
{
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA-capable devices: %d.\n", dev_count);
cudaDeviceProp dev_prop;
for (int j=0; j<dev_count; j++)
{
cudaGetDeviceProperties(&dev_prop, j);
printf("Device number %d has max %d threads per block.\n", j, dev_prop.maxThreadsPerBlock);
printf("Device number %d has %d multiprocessors.\n", j, dev_prop.multiProcessorCount);
}
int i;
curandGenerator_t gen1, gen2;
float *dev_u1, *dev_u2, *host_u1, *host_u2;
float *dev_n1, *dev_n2, *host_n1, *host_n2;
// allocate memory on the host
//host_u1 = (float*)calloc(N, sizeof(float));
//host_u2 = (float*)calloc(N, sizeof(float));
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
host_n1 = (float*)calloc(N, sizeof(float));
host_n2 = (float*)calloc(N, sizeof(float));
// allocate memory on the device
cudaMalloc((void**)&dev_u1, N * sizeof(float));
cudaMalloc((void**)&dev_u2, N * sizeof(float));
cudaMalloc((void**)&dev_n1, N * sizeof(float));
cudaMalloc((void**)&dev_n2, N * sizeof(float));
// create a mersenne twister
curandCreateGenerator(&gen1, CURAND_RNG_PSEUDO_MTGP32);
curandCreateGenerator(&gen2, CURAND_RNG_PSEUDO_MTGP32);
// set seed
curandGenerateUniform(gen1, dev_u1, N);
curandGenerateUniform(gen2, dev_u2, N);
int numBlocks = ceil(float(N) / BLOCK_SIZE);
// box muller transform
norm_transform<<<numBlocks, BLOCK_SIZE>>>(dev_u1, dev_u2, dev_n1, dev_n2, N);
// copy device memory to host
cudaMemcpy(host_n1, dev_n1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(host_n2, dev_n2, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time elapsed to generate 2 x %d normal variables: %f seconds.\n", N, elapsedTime/1000.0);
printf("Random normal draws: \n");
for (i = 0; i < 10; i++)
{
printf(" %1.4f %1.4f\n", host_n1[i], host_n2[i]);
}
printf("\n");
curandDestroyGenerator(gen1);
curandDestroyGenerator(gen2);
cudaDeviceReset();
free(host_n1);
free(host_n2);
return 0;
}
|
598
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <cuda.h>
__global__ void kernel_grey(uint8_t* start, uint8_t* end, int height, int width)
{
int i, j, k, l;
float blur[3][3] = {{1,2,1}, {2,4,2}, {1,2,1}}; // filtro blur gaussian
float filter[3][3];
for(int i=0;i<3;i++)
for(int j=0;j<3;j++)
filter[i][j]=blur[2-i][2-j]; // antistofi filtrou ana grammes kai ana sthles
// h thesi tou thread ston pinaka
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
float new_val;
new_val = 0.0; // h nea timh sthn thesh tou pinaka
for (i = x-1, k = 0 ; i <= x+1 ; i++, k++) {
for (j = y-1, l = 0 ; j <= y+1 ; j++, l++) {
if ( i == -1 || i == height || j == -1 || j == width ) {
new_val = start[width * x + y]; // an eisai ektos twn diastasevn tou pinaka,
//pol/se thn timh tou pixel ths theshs, pou allazeis timh, me thn antistoixi thesi tou filtrou
}
else {
new_val += start[width * i + j] * filter[k][l] / 16.0; // an eisai entos twn diastasevn tou pinaka,
//pol/se thn timh tou pixel pou koitas me thn antistoixi thesi tou filtrou
}
}
}
end[width * x + y] = new_val;
}
__global__ void kernel_rgb(uint8_t* start, uint8_t* end, int height, int width)
{
int i, j, k, l;
float blur[3][3] = {{1,2,1}, {2,4,2}, {1,2,1}}; // filtro blur gaussian
float filter[3][3];
for(int i=0;i<3;i++)
for(int j=0;j<3;j++)
filter[i][j]=blur[2-i][2-j]; // antistofi filtrou ana grammes kai ana sthles
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
float new_val_red, new_val_blue, new_val_green;
new_val_red = 0.0; // h nees times sthn thesh tou pinaka gia red, green and blue
new_val_blue = 0.0;
new_val_green = 0.0;
for (i = x-1, k = 0 ; i <= x+1 ; i++, k++) {
for (j = (y*3)-3, l = 0 ; j <= (y*3)+3 ; j += 3, l++) {
if ( i == -1 || i == height || j == -3 || j == (3*width) ) { // an eisai ektos twn diastasevn tou pinaka,
//pol/se tis times gia red, green and blue tou pixel ths theshs, pou allazeis timh, me thn antistoixi thesi tou filtrou
new_val_red += start[(width*3) * x + y] * filter[k][l] / 16.0;
new_val_green += start[(width*3) * x + y + 1] * filter[k][l] / 16.0;
new_val_blue += start[(width*3) * x + y + 2] * filter[k][l] / 16.0;
}
else { // an eisai entos twn diastasevn tou pinaka,
//pol/se tis times gia red, green and blue tou pixel pou koitas me thn antistoixi thesi tou filtrou
new_val_red += start[(width*3) * i + j] * filter[k][l] / 16.0;
new_val_green += start[(width*3) * i + j + 1] * filter[k][l] / 16.0;
new_val_blue += start[(width*3) * i + j + 2] * filter[k][l] / 16.0;
}
}
}
end[(width*3) * x + (y*3)] = new_val_red;
end[(width*3) * x + (y*3) + 1] = new_val_green;
end[(width*3) * x + (y*3) + 2] = new_val_blue;
}
extern "C" void filtering( uint8_t* table, int height, int width, int loops, char* type, int blocksize)
{
uint8_t* start, * end, * temp;
size_t bytes;
if(strcmp(type, "GREY") == 0) bytes = height * width;
else bytes = height * width * 3;
// desmeuse xoro gia kathe vector sthn GPU
cudaMalloc( &start, bytes*sizeof(uint8_t) );
cudaMalloc( &end, bytes*sizeof(uint8_t) );
// antegrapse ta host vectors sthn mnhmh ths suskeuhs
cudaMemcpy( start, table, bytes, cudaMemcpyHostToDevice );
cudaMemset( end, 0, bytes );
int i;
for(i=0; i<loops; i++) // gia osa loops zhththikan
{
if(strcmp(type, "GREY") == 0) // an einai GREY
{
int gridX, gridY;
gridX = (height + blocksize - 1) / blocksize;
gridY = (width + blocksize - 1) / blocksize;
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid(gridX, gridY);
kernel_grey<<<dimGrid, dimBlock>>>(start, end, height, width);
}
else // an einai RGB
{
int gridX, gridY;
gridX = (height + blocksize - 1) / blocksize;
gridY = ((width * 3) + blocksize - 1) / blocksize;
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid(gridX, gridY);
kernel_rgb<<<dimGrid, dimBlock>>>(start, end, height, width);
}
// antimetathese tous pinakes
temp = start;
start = end;
end = temp;
}
cudaGetLastError();
cudaThreadSynchronize();
// antegrapse thn telikh eikona ston host
if(loops%2 == 0)
{
cudaMemcpy( table, start, bytes, cudaMemcpyDeviceToHost );
}
else
{
cudaMemcpy( table, end, bytes, cudaMemcpyDeviceToHost );
}
// apeleutherwse thn mnhmh ths suskeuhs
cudaFree(start);
cudaFree(end);
}
|
599
|
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
using namespace std;
double get_time()
{ struct timeval tim;
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
//KERNEL
__global__ void Add(float *a, float *b, float *c, int N, int BSZ)
{
int i = blockIdx.x*BSZ + threadIdx.x;
if(i<N)
c[i] = a[i] + b[i];
}
int main()
{
//ALLOCATE AND INITIALIZE DATA ON CPU
printf("----------------------------------------\n");
printf(" ALLOCATE AND INITIALIZE DATA ON CPU\n");
printf("----------------------------------------\n");
int N =8388608; //n° of threads
cout<<"N° of threads="<<N<<endl;
int blocksize = 256;
cout<<"N° of threads per block="<<blocksize<<endl;
int num_block = (N-0.5)/blocksize + 1;
cout<<"N° of blocks="<<num_block<<endl;
//float *a, *b, *c;
//a = (float *)malloc(N*sizeof(float));
float *a = new float[N];
float *b = new float[N];
float *c = new float[N];
for( int j=0;j<N;++j)
{
a[j]=j;
b[j]=j;
c[j]=0;
}
cout<<"A[0]="<<a[0]<<endl;
printf(".\n.\n.\n");
cout<<"A[N-1]="<<a[N-1]<<endl;
printf(" + \n");
cout<<"B[0]="<<b[0]<<endl;
printf(".\n.\n.\n");
cout<<"B[N-1]="<<b[N-1]<<endl;
//ALLOCATE DATA ON GPU
printf("----------------------------------------\n");
printf(" ALLOCATE DATA ON GPU\n");
printf("----------------------------------------\n");
float *d_a, *d_b, *d_c;
cudaMalloc((void**) &d_a, N*sizeof(float));
cudaMalloc((void**) &d_b, N*sizeof(float));
cudaMalloc((void**) &d_c, N*sizeof(float));
//cudaMalloc(&d_a, N*sizeof(float));
//TRANSFER DATA FROM CPU TO GPU
printf(" TRANSFER DATA FROM CPU TO GPU\n");
printf("----------------------------------------\n");
cudaMemcpy(d_a, a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, N*sizeof(float), cudaMemcpyHostToDevice);
//RUN KERNEL
printf(" RUN KERNEL");
double start = get_time(); //Initial time
Add<<<num_block,blocksize>>>(d_a, d_b, d_c, N, blocksize);
double finish = get_time(); //Final time
double diff = finish - start;
cout<<" time ="<<diff<<" [s]"<<endl;
printf("----------------------------------------\n");
//TRANSFER DATA FROM GPU TO CPU
printf(" TRANSFER DATA FROM GPU TO CPU\n");
printf("----------------------------------------\n");
cudaMemcpy(c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost);
cout<<"\na[N-1] + b[N-1] = "<<a[N-1]<<" + "<<b[N-1]<<endl;
cout<<"c[N-1] = "<<c[N-1]<<endl;
//FREE MEMORY
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
600
|
#include <stdio.h>
#include <iostream>
#define N 512
#define BLOCK_DIM 32
__global__ void matrixAdd(int *d_a, int *d_b, int *d_out){
// Mapping from 2D block grid to absolute 2D locations on matrix
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
// 2D location in matrix to global memory 1D offset
int index = idx_y + idx_x * N; // Row-major order with 0 based indices
// Add element
if (idx_x < N && idx_y < N){
d_out[index] = d_a[index] + d_b[index];
}
}
int main(){
// Declare 2D matrices on host
int h_a[N][N], h_b[N][N], h_out[N][N];
// Declare device/GPU memory pointers
int *d_a, *d_b, *d_out;
// Memory size
int size = N * N * sizeof(int);
// Initialize matrices on host
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
h_a[i][j] = i + j;
h_b[i][j] = i * j;
}
}
// Allocate GPU memory
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_out, size);
// Transfer input matrices from host to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
// Define grid blocks dimensions
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize((int)ceil(N/blockSize.x), (int)ceil(N/blockSize.y));
// Launch the kernel
matrixAdd<<<gridSize, blockSize>>>(d_a, d_b, d_out);
// Copy the result from device to the host
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
// Print out the sum of output matrix elements
int total = 0;
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
total += h_out[i][j];
}
}
std::cout << "Total: " << total << std::endl;
// Free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.