Prompt
stringlengths 10
31k
| Chosen
stringlengths 3
29.4k
| Rejected
stringlengths 3
51.1k
| Title
stringlengths 9
150
| Tags
listlengths 3
7
|
|---|---|---|---|---|
I am trying to write a query which calculates the difference between the value rows as a new column called difference when the datetime field is in ascending order.
For example, 2016-03-02 should be 102340624 - 102269208
```
select datetime, tagname, value
from runtime.dbo.AnalogHistory
where datetime between '20160301 00:00' and '20160401 00:00'
and TagName = 'EWS_A3_PQM.3P_REAL_U'
and wwResolution = (1440 * 60000)
order by DateTime asc
DATETIME TAGNAME VALUE DIFFERENCE
2016-03-01 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102269208
2016-03-02 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102340624
2016-03-03 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102411568
2016-03-04 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102478104
2016-03-05 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102549088
2016-03-06 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102612592
2016-03-07 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102682984
2016-03-08 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102747000
2016-03-09 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102817176
2016-03-10 00:00:00.0000000 EWS_A3_PQM.3P_REAL_U 102887896
```
Thank you very much in advance
|
You can use the lag function to get the previous rows value.
```
Select datetime, tagname, value, value- coalesce(lag(value) over(partition by tagname order by datetime),0) [difference]
from runtime.dbo.AnalogHistory
where datetime between '20160301 00:00' and '20160401 00:00'
and TagName = 'EWS_A3_PQM.3P_REAL_U'
and wwResolution = (1440 * 60000)
order by DateTime asc
```
|
For sql server versions 2005 or higher but before 2012 (where you don't have lag and lead functions)
```
;with cte as
(
select datetime, tagname, value
from runtime.dbo.AnalogHistory
where datetime between '20160301 00:00' and '20160401 00:00'
and TagName = 'EWS_A3_PQM.3P_REAL_U'
and wwResolution = (1440 * 60000)
)
select datetime, tagname, value, value - isnull((select top 1 value from cte t2 where t2.datetime < t1.datetime order by t2.datetime desc), 0) as difference
from cte t1
order by DateTime
```
For sql server 2012 or higher:
```
select datetime, tagname, value, value - isnull(lag(value) over (order by datetime), 0)
from runtime.dbo.AnalogHistory
where datetime between '20160301 00:00' and '20160401 00:00'
and TagName = 'EWS_A3_PQM.3P_REAL_U'
and wwResolution = (1440 * 60000)
order by DateTime
```
|
Difference between two rows with date in ascending order
|
[
"",
"sql",
"sql-server",
"t-sql",
""
] |
I'm working on JOIN statements (implicit) and I've set up the code to join without much of a hitch, and when the code runs I get quite a few duplicates per person. I was wondering what kind of statement I should use to only show one of each person?
**Select Statement**
```
SELECT CONCAT(customers.customer_first_name, ' ', customers.customer_last_name) as 'Customer',
orders.order_date as 'Order Date', customers.customer_zip as 'Zipcode'
FROM customers, orders, order_details
WHERE order_details.item_id = 10
ORDER BY CONCAT(customers.customer_first_name, ' ', customers.customer_last_name) ASC;
```
If you need other parts of the code, they are readily available.
Create Table/Insert Statements:
```
/*Create Tables*/
CREATE TABLE customers
(
customer_id INT,
customer_first_name VARCHAR(20),
customer_last_name VARCHAR(20) NOT NULL,
customer_address VARCHAR(50) NOT NULL,
customer_city VARCHAR(20) NOT NULL,
customer_state CHAR(2) NOT NULL,
customer_zip CHAR(5) NOT NULL,
customer_phone CHAR(10) NOT NULL,
customer_fax CHAR(10),
CONSTRAINT customers_pk
PRIMARY KEY (customer_id)
);
CREATE TABLE artists
(
artist_id INT NOT NULL,
artist_name VARCHAR(30),
CONSTRAINT artist_pk
PRIMARY KEY (artist_id)
);
CREATE TABLE items
(
item_id INT NOT NULL,
title VARCHAR(50) NOT NULL,
artist_id INT NOT NULL,
unit_price DECIMAL(9,2) NOT NULL,
CONSTRAINT items_pk
PRIMARY KEY (item_id),
CONSTRAINT items_fk_artists
FOREIGN KEY (artist_id) REFERENCES artists (artist_id)
);
CREATE TABLE employees
(
employee_id INT NOT NULL,
last_name VARCHAR(20) NOT NULL,
first_name VARCHAR(20) NOT NULL,
manager_id INT,
CONSTRAINT employees_pk
PRIMARY KEY (employee_id),
CONSTRAINT emp_fk_mgr FOREIGN KEY (manager_id) REFERENCES employees(employee_id)
);
CREATE TABLE orders
(
order_id INT NOT NULL,
customer_id INT NOT NULL,
order_date DATE NOT NULL,
shipped_date DATE,
employee_id INT,
CONSTRAINT orders_pk
PRIMARY KEY (order_id),
CONSTRAINT orders_fk_customers
FOREIGN KEY (customer_id) REFERENCES customers (customer_id),
CONSTRAINT orders_fk_employees
FOREIGN KEY (employee_id) REFERENCES employees (employee_id)
);
CREATE TABLE order_details
(
order_id INT NOT NULL,
item_id INT NOT NULL,
order_qty INT NOT NULL,
CONSTRAINT order_details_pk
PRIMARY KEY (order_id, item_id),
CONSTRAINT order_details_fk_orders
FOREIGN KEY (order_id)
REFERENCES orders (order_id),
CONSTRAINT order_details_fk_items
FOREIGN KEY (item_id)
REFERENCES items (item_id)
);
/*Insert Statements*/
INSERT INTO customers VALUES
(1,'Korah','Blanca','1555 W Lane Ave','Columbus','OH','43221','6145554435','6145553928'),
(2,'Yash','Randall','11 E Rancho Madera Rd','Madison','WI','53707','2095551205','2095552262'),
(3,'Johnathon','Millerton','60 Madison Ave','New York','NY','10010','2125554800',NULL),
(4,'Mikayla','Davis','2021 K Street Nw','Washington','DC','20006','2025555561',NULL),
(5,'Kendall','Mayte','4775 E Miami River Rd','Cleves','OH','45002','5135553043',NULL),
(6,'Kaitlin','Hostlery','3250 Spring Grove Ave','Cincinnati','OH','45225','8005551957','8005552826'),
(7,'Derek','Chaddick','9022 E Merchant Wy','Fairfield','IA','52556','5155556130',NULL),
(8,'Deborah','Davis','415 E Olive Ave','Fresno','CA','93728','5595558060',NULL),
(9,'Karina','Lacy','882 W Easton Wy','Los Angeles','CA','90084','8005557000',NULL),
(10,'Kurt','Nickalus','28210N Avenue Stanford','Valencia','CA','91355','8055550584','055556689'),
(11,'Kelsey','Eulalia','7833 N Ridge Rd','Sacramento','CA','95887','2095557500','2095551302'),
(12,'Anders','Rohansen','12345 E 67th Ave NW','Takoma Park','MD','24512','3385556772',NULL),
(13,'Thalia','Neftaly','2508 W Shaw Ave','Fresno','CA','93711','5595556245',NULL),
(14,'Gonzalo','Keeton','12 Daniel Road','Fairfield','NJ','07004','2015559742',NULL),
(15,'Ania','Irvin','1099 N Farcourt St','Orange','CA','92807','7145559000',NULL),
(16,'Dakota','Baylee','1033 NSycamore Ave.','Los Angeles','CA','90038','2135554322',NULL),
(17,'Samuel','Jacobsen','3433 E Widget Ave','Palo Alto','CA','92711','4155553434',NULL),
(18,'Justin','Javen','828 S Broadway','Tarrytown','NY','10591','8005550037',NULL),
(19,'Kyle','Marissa','789 E Mercy Ave','Phoenix','AZ','85038','9475553900',NULL),
(20,'Erick','Kaleigh','Five Lakepointe Plaza, Ste 500','Charlotte','NC','28217','7045553500',NULL),
(21,'Marvin','Quintin','2677 Industrial Circle Dr','Columbus','OH','43260','6145558600','6145557580'),
(22,'Rashad','Holbrooke','3467 W Shaw Ave #103','Fresno','CA','93711','5595558625','5595558495'),
(23,'Trisha','Anum','627 Aviation Way','Manhatttan Beach','CA','90266','3105552732',NULL),
(24,'Julian','Carson','372 San Quentin','San Francisco','CA','94161','6175550700',NULL),
(25,'Kirsten','Story','2401 Wisconsin Ave NW','Washington','DC','20559','2065559115',NULL);
INSERT INTO artists(artist_id,artist_name) VALUES
(10,'Umani'),
(11,'The Ubernerds'),
(12,'No Rest For The Weary'),
(13,'Burt Ruggles'),
(14,'Sewed the Vest Pocket'),
(15,'Jess & Odie'),
(16,'Onn & Onn');
INSERT INTO items (item_id,title,artist_id,unit_price) VALUES
(1,'Umami In Concert',10,17.95),
(2,'Race Car Sounds',11,13),
(3,'No Rest For The Weary',12,16.95),
(4,'More Songs About Structures and Comestibles',12,17.95),
(5,'On The Road With Burt Ruggles',13,17.5),
(6,'No Fixed Address',14,16.95),
(7,'Rude Noises',15,13),
(8,'Burt Ruggles: An Intimate Portrait',13,17.95),
(9,'Zone Out With Umami',10,16.95),
(10,'Etcetera',16,17);
INSERT INTO employees VALUES
(1,'Smith','Cindy',null),
(2,'Jones','Elmer',1),
(3,'Simonian','Ralph',2),
(9,'Locario','Paulo',1),
(8,'Leary','Rhea',9),
(4,'Hernandez','Olivia',9),
(5,'Aaronsen','Robert',4),
(6,'Watson','Denise',8),
(7,'Hardy','Thomas',2);
INSERT INTO orders VALUES
(19,1,'2012-10-23','2012-10-28',6),
(29,8,'2012-11-05','2012-11-11',6),
(32,11,'2012-11-10','2012-11-13',NULL),
(45,2,'2012-11-25','2012-11-30',NULL),
(70,10,'2012-12-28','2013-01-07',5),
(89,22,'2013-01-20','2013-01-22',7),
(97,20,'2013-01-29','2013-02-02',5),
(118,3,'2013-02-24','2013-02-28',7),
(144,17,'2013-03-21','2013-03-29',NULL),
(158,9,'2013-04-04','2013-04-20',NULL),
(165,14,'2013-04-11','2013-04-13',NULL),
(180,24,'2013-04-25','2013-05-30',NULL),
(231,15,'2013-06-14','2013-06-22',NULL),
(242,23,'2013-06-24','2013-07-06',3),
(264,9,'2013-07-15','2013-07-18',6),
(298,18,'2013-08-18','2013-09-22',3),
(321,2,'2013-09-09','2013-10-05',6),
(381,7,'2013-11-08','2013-11-16',7),
(413,17,'2013-12-05','2014-01-11',7),
(442,5,'2013-12-28','2014-01-03',5),
(479,1,'2014-01-30','2014-03-03',3),
(491,16,'2014-02-08','2014-02-14',5),
(523,3,'2014-03-07','2014-03-15',3),
(548,2,'2014-03-22','2014-04-18',NULL),
(550,17,'2014-03-23','2014-04-03',NULL),
(601,16,'2014-04-21','2014-04-27',NULL),
(607,20,'2014-04-25','2014-05-04',NULL),
(624,2,'2014-05-04','2014-05-09',NULL),
(627,17,'2014-05-05','2014-05-10',NULL),
(630,20,'2014-05-08','2014-05-18',7),
(651,12,'2014-05-19','2014-06-02',7),
(658,12,'2014-05-23','2014-06-02',7),
(687,17,'2014-06-05','2014-06-08',NULL),
(693,9,'2014-06-07','2014-06-19',NULL),
(703,19,'2014-06-12','2014-06-19',7),
(778,13,'2014-07-12','2014-07-21',7),
(796,17,'2014-07-19','2014-07-26',5),
(800,19,'2014-07-21','2014-07-28',NULL),
(802,2,'2014-07-21','2014-07-31',NULL),
(824,1,'2014-08-01',NULL,NULL),
(827,18,'2014-08-02',NULL,NULL),
(829,9,'2014-08-02',NULL,NULL);
INSERT INTO order_details VALUES
(381,1,1),
(601,9,1),
(442,1,1),
(523,9,1),
(630,5,1),
(778,1,1),
(693,10,1),
(118,1,1),
(264,7,1),
(607,10,1),
(624,7,1),
(658,1,1),
(800,5,1),
(158,3,1),
(321,10,1),
(687,6,1),
(827,6,1),
(144,3,1),
(479,1,2),
(630,6,2),
(796,5,1),
(97,4,1),
(601,5,1),
(800,1,1),
(29,10,1),
(70,1,1),
(165,4,1),
(180,4,1),
(231,10,1),
(413,10,1),
(491,6,1),
(607,3,1),
(651,3,1),
(703,4,1),
(802,3,1),
(824,7,2),
(829,1,1),
(550,4,1),
(796,7,1),
(693,6,1),
(29,3,1),
(32,7,1),
(242,1,1),
(298,1,1),
(479,4,1),
(548,9,1),
(627,9,1),
(778,3,1),
(19,5,1),
(89,4,1),
(242,6,1),
(264,4,1),
(550,1,1),
(693,7,3),
(824,3,1),
(829,5,1),
(829,9,1);
```
|
You have a Descartes product, not a `join`. You could use the `distinct` keyword or you could do a `group by`, but it seems you really need a `join` instead. I am writing something like that for you, but since I do not know your `table` structure, I will be guessing the columns:
```
SELECT CONCAT(customers.customer_first_name, ' ', customers.customer_last_name) as 'Customer',
orders.order_date as 'Order Date', customers.customer_zip as 'Zipcode'
FROM order_details
join orders
on order_details.order_id = orders.order_id and order_details.item_id = 10
join customers
on orders.customer_id = customers.customer_id
ORDER BY CONCAT(customers.customer_first_name, ' ', customers.customer_last_name) ASC;
```
Naturally, there is no guarantee there will be a single record per customer, since we, at least lacking information cannot assume that there are no customers who have multiple orders, each having an order detail with item\_id = 10
|
If you don't want to use the `JOIN` keyword you can add the key columns on whick the tables are related to the `WHERE` clause like this:
```
SELECT CONCAT(customers.customer_first_name, ' ', customers.customer_last_name) as 'Customer',
orders.order_date as 'Order Date', customers.customer_zip as 'Zipcode'
FROM customers, orders, order_details
WHERE order_details.item_id = 10
AND orders.customer_id = customers.customer_id
AND order_details.order_id = orders.order_id
ORDER BY CONCAT(customers.customer_first_name, ' ', customers.customer_last_name) ASC;
```
|
MySQL: Implicit Join with conditions: What kind of statement would I need for duplicate removal?
|
[
"",
"mysql",
"sql",
"join",
"duplicates",
"implicit",
""
] |
How can I write a SQL query to do below function. I have a column values with underscore `"_"`. I want to split these values by underscore `"_"` to create two new columns named pID, nID and keep original ID column intact.
```
Input example Output example
ID | | pID | nID |
1234_591856 | ==> | 1234 | 591856 |
12547_15795 | | 12547| 15795 |
12_185666 | | 12 | 18566 |
```
|
If you want to add two new columns to your table you could use ALTER TABLE:
```
alter table mytable
add column pid varchar(100),
add column nid varchar(100);
```
then you can update the value of the newly created columns:
```
update mytable
set
pid=substring_index(id, '_', 1),
nid=substring_index(id, '_', -1)
where
id like '%\_%'
```
|
You can get by below query-
```
SELECT
SUBSTRING_INDEX(mycol,'_',1),
SUBSTRING_INDEX(mycol,'_',-1)
FROM mytable;
```
|
How can I split a value into multiple columns in mySQL
|
[
"",
"mysql",
"sql",
""
] |
Using Rails. I have the following code:
```
class TypeOfBlock < ActiveRecord::Base
has_and_belongs_to_many :patients
end
class Patient < ActiveRecord::Base
has_and_belongs_to_many :type_of_blocks, dependent: :destroy
end
```
With these sets of tables:
```
ββββββββββββββββ
βtype_of_blocksβ
β βββββββ¦ββββββββ£
β id β name β
β βββββββ¬ββββββββ£
β 1 β UP β
β 2 β LL β
β 3 β T β
ββββββββ©ββββββββ
βββββββββββββββββββββββββββββββββ
β patients_type_of_blocks β
β βββββββββββββββββββ¦βββββββββββββ£
β type_of_block_id β patient_id β
β βββββββββββββββββββ¬βββββββββββββ£
β 1 β 1 β
β 1 β 2 β
β 2 β 2 β
β 3 β 3 β
β 2 β 4 β
β 1 β 5 β
β 1 β 6 β
β 2 β 6 β
β 3 β 6 β
ββββββββββββββββββββ©βββββββββββββ
```
I want to count the number of unique patient depends on type of block combination, here's the expected result:
```
# Expected results (just like a truth table)
UP (patient with type_of_block_id 1 only) = 2 patient
UP + LL (patient with type_of_block_ids 1 and 2) = 1 patient
UP + T (patient with type_of_block_ids 1 and 3) = 0 patient
LL (patient with type_of_block_id 2 only) = 1 patient
LL + T (patient with type_of_block_ids 2 and 3) = 0 patient
T (patient with type_of_block_id 3 only) = 1 patient
UP + LL + T (patient with type_of_block_ids 1, 2 and 3) = 1 patient
```
I've tried to join the tables like below:
```
up_ll =
TypeOfBlock.
joins("join patients_type_of_blocks on patients_type_of_blocks.type_of_block_id = type_of_blocks.id").
where("patients_type_of_blocks.type_of_block_id = 1 and patients_type_of_blocks.type_of_block_id = 2").
size
```
But there are just too much complexity, and the number is wrong. I wanted to try raw SQL, but Rails 4 deprecated it and requires me to do `ModelClass.find_by_sql`.
How can I generate the above expected results?
|
The only solution that comes to my mind is to use raw SQL and leverage the **[`group_concat` function](http://dev.mysql.com/doc/refman/5.7/en/group-by-functions.html#function_group-concat)**, as shown [here](https://stackoverflow.com/a/11640438/1544012).
The SQL needed is this:
```
SELECT
combination,
count(*) as cnt
FROM (
SELECT
ptb.patient_id,
group_concat(tb.name ORDER BY tb.name) AS combination
FROM type_of_blocks tb
INNER JOIN patients_type_of_blocks ptb ON ptb.type_of_block_id = tb.id
GROUP BY ptb.patient_id) patient_combinations
GROUP BY combination;
```
The inner select groups by the patients and selects the combinations of block types each of the patient has. The outer select then simply counts the patients in each combination.
The query returns the following (see the [SQL fiddle](http://sqlfiddle.com/#!9/e2de08/3/0)):
```
combination cnt
LL 1
LL,T,UP 1
LL,UP 1
T 1
UP 2
```
As you can see, the query does not return zero counts, this has to be solved in ruby code (perhaps initialize a hash with all combinations with zeroes and then merge with the query counts).
To integrate this query to ruby, simply use the `find_by_sql` method on any model (and for example convert the results to a hash):
```
sql = <<-EOF
...the query from above...
EOF
TypeOfBlock.find_by_sql(sql).to_a.reduce({}) { |h, u| h[u.combination] = u.cnt; h }
# => { "LL" => 1, "LL,T,UP" => 1, "LL,UP" => 1, "T" => 1, "UP" => 2 }
```
|
The answer provided by [BoraMa](https://stackoverflow.com/a/36318432/5070879) is correct. I just want to address:
> ***As you can see, the query does not return zero counts,*** this has to be
> solved in ruby code (perhaps initialize a hash with all combinations
> with zeroes and then merge with the query counts).
It could be achieved by using pure MySQL:
```
SELECT sub.combination, COALESCE(cnt, 0) AS cnt
FROM (SELECT GROUP_CONCAT(Name ORDER BY Name SEPARATOR ' + ') AS combination
FROM (SELECT p.Name, p.rn, LPAD(BIN(u.N + t.N * 10), size, '0') bitmap
FROM (SELECT @rownum := @rownum + 1 rn, id, Name
FROM type_of_blocks, (SELECT @rownum := 0) r) p
CROSS JOIN (SELECT 0 N UNION ALL SELECT 1
UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4
UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7
UNION ALL SELECT 8 UNION ALL SELECT 9) u
CROSS JOIN (SELECT 0 N UNION ALL SELECT 1
UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4
UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7
UNION ALL SELECT 8 UNION ALL SELECT 9) t
CROSS JOIN (SELECT COUNT(*) AS size FROM type_of_blocks) o
WHERE u.N + t.N * 10 < POW(2, size)
) b
WHERE SUBSTRING(bitmap, rn, 1) = '1'
GROUP BY bitmap
) AS sub
LEFT JOIN (
SELECT combination, COUNT(*) AS cnt
FROM (SELECT ptb.patient_id,
GROUP_CONCAT(tb.name ORDER BY tb.name SEPARATOR ' + ') AS combination
FROM type_of_blocks tb
JOIN patients_type_of_blocks ptb
ON ptb.type_of_block_id = tb.id
GROUP BY ptb.patient_id) patient_combinations
GROUP BY combination
) AS sub2
ON sub.combination = sub2.combination
ORDER BY LENGTH(sub.combination), sub.combination;
```
`SQLFiddleDemo`
Output:
```
ββββββββββββββββ¦ββββββ
β combination β cnt β
β βββββββββββββββ¬ββββββ£
β T β 1 β
β LL β 1 β
β UP β 2 β
β LL + T β 0 β
β T + UP β 0 β
β LL + UP β 1 β
β LL + T + UP β 1 β
ββββββββββββββββ©ββββββ
```
How it works:
1. Generate all possible combinations using method described by [Serpiton](https://stackoverflow.com/a/24245457/5070879) (with slight improvements)
2. Calculate available combinations
3. Combine both results
---
To better understand how it works `Postgresql` version of generating all cominations:
```
WITH all_combinations AS (
SELECT string_agg(b.Name ,' + ' ORDER BY b.Name) AS combination
FROM (SELECT p.Name, p.rn, RIGHT(o.n::bit(16)::text, size) AS bitmap
FROM (SELECT *, ROW_NUMBER() OVER(ORDER BY id)::int AS rn
FROM type_of_blocks )AS p
CROSS JOIN generate_series(1, 100000) AS o(n)
,LATERAL(SELECT COUNT(*)::int AS size FROM type_of_blocks) AS s
WHERE o.n < 2 ^ size
) b
WHERE SUBSTRING(b.bitmap, b.rn, 1) = '1'
GROUP BY b.bitmap
)
SELECT sub.combination, COALESCE(sub2.cnt, 0) AS cnt
FROM all_combinations sub
LEFT JOIN (SELECT combination, COUNT(*) AS cnt
FROM (SELECT ptb.patient_id,
string_agg(tb.name,' + ' ORDER BY tb.name) AS combination
FROM type_of_blocks tb
JOIN patients_type_of_blocks ptb
ON ptb.type_of_block_id = tb.id
GROUP BY ptb.patient_id) patient_combinations
GROUP BY combination) AS sub2
ON sub.combination = sub2.combination
ORDER BY LENGTH(sub.combination), sub.combination;
```
`SqlFiddleDemo2`
|
Rails complex query to count unique records based on truth table
|
[
"",
"mysql",
"sql",
"ruby-on-rails",
""
] |
This is my table
```
date count of subscription per date
---- ----------------------------
21-03-2016 10
22-03-2016 30
23-03-2016 40
```
Please need your help, I need to get the result like below table, summation second row with first row, same thing for another rows:
```
date count of subscription per date
---- ----------------------------
21-03-2016 10
22-03-2016 40
23-03-2016 80
```
|
```
SELECT t.date, (
SELECT SUM(numsubs)
FROM mytable t2
WHERE t2.date <= t.date
) AS cnt
FROM mytable t
```
|
You can do a cumulative sum using the ANSI standard analytic `SUM()` function:
```
select date, sum(numsubs) over (order by date) as cume_numsubs
from t;
```
|
SQL query to calculate sum of row with previous row
|
[
"",
"sql",
"oracle",
""
] |
I'm curious about something in a SQL Server database. My current query pulls data about my employer's items for sale. It finds information for just under 105,000 items, which is correct. However, it returns over 155,000 rows, because each item has other things related to it. Right now, I run that data through a loop in Python, manually flattening it out by checking if the item the loop is working on is the same one it just worked on. If it is, I start filling in that item's extra information. Ideally, the SQL would return all this data already put into one row.
Here is an overview of the setup. I'm leaving out a few details for simplicity's sake, since I'm curious about the general theory, not looking for something I can copy and paste.
Item: contains the item ID, SKU, description, vendor ID, weight, and dimensions.
AttributeName: contains attr\_id and attr\_text. For instance, "color", "size", or "style".
AttributeValue: contains attr\_value\_id and attr\_text. For instance, "blue" or "small".
AttributeAssign: contains item\_id and attr\_id. This ties attribute names to items.
attributeValueAssign: contains item\_id and attr\_value\_id, tying attribute values to items.
A series of attachments is set up in a similar way, but with attachment and attachmentAssignment. Attachments can have only values, no names, so there is no need for the extra complexity of a third table as there is with attributes.
Vendor is simple: the ID is used in the item table. That is:
```
select item_id, vendorName
from item
join vendor on vendor_id = item.vendorNumber
```
gets you the name of an item's vendor.
Now, the fun part: items may or may not have vendors, attributes, or attachments. If they have either of the latter two, there's no way to know how many they have. I've seen items with 0 attributes and items with 5. Attachments are simpler, as there can only be 0 or 1 per item, but the possibility of 0 still demands an outer left join so I am guaranteed to get all the items.
That's how I get multiple rows per item. If an item has three attrigbutes, I get either four or seven rows for just that item--I'm not sure if it's a row per name/value or a row per name AND a row per value. Either way, this is the kind of thing I'd like to stop. I want each row in my result set to contain all attributes, with a cap at seven and null for any missing attribute. That is, something like:
item\_id; item\_title; item\_sku; ... attribute1\_name; attribute1\_value; attribute2\_name; attribute2\_value; ... attribute7\_value
1; some random item; 123-45; ... color; blue; size; medium; ... null
Right now, I'd get multiple rows for that, such as (only ID and attributes):
ID; attribute 1 name; attribute 1 value; attribute 2 name; attribute 2 value
1; color; blue; null; null
1; color; blue; size; medium
I'm after the second row only--all the information put together into one row per unique item ID. Currently, though, I get multiple rows, and Python has to put everything together. I'm outputting this to a spreadsheet, so information about an item has to be on that item's row.
I can just keep using Python if this is too much bother. But I wondered if there was a way to do it that would be relatively easy. My script works fine, and execution time isn't a concern. This is more for my own curiosity than a need to get anything working. Any thoughts on how--or if--this is possible?
|
Since you only want the first 7 attributes and you want to keep all of the logic in the SQL query, you're probably looking at using row\_number. Subqueries will do the job directly with multiple joins, and the performance will probably be pretty good since you're only joining so many times.
```
select
i.item_id,
attr1.name as attribute1_name, attr1.value as attribute1_value,
...
attr7.name as attribute7_name, attr7.value as attribute7_value
from
items i
left join (
select
*, row_number() over(partition by item_id order by name, attribute_id) as row_number
from
attributes
) AS attr1 ON
attr1.item_id = i.item_id
AND attr1.row_number = 1
...
left join (
select
*, row_number() over(partition by item_id order by name, attribute_id) as row_number
from
attributes
) AS attr7 ON
attr7.item_id = i.item_id
AND attr7.row_number = 7
```
|
Here is @WCWedin's answer modified to use a CTE.
```
WITH attrib_rn as
(
select
*, row_number() over(partition by item_id order by name, attribute_id) as row_number
from attributes
)
select
i.item_id,
attr1.name as attribute1_name, attr1.value as attribute1_value,
...
attr7.name as attribute7_name, attr7.value as attribute7_value
from items i
left join attrib_rn as attr1 ON attr1.item_id = i.item_id AND attr1.row_number = 1
left join attrib_rn as attr2 ON attr2.item_id = i.item_id AND attr2.row_number = 2
left join attrib_rn as attr3 ON attr3.item_id = i.item_id AND attr3.row_number = 3
left join attrib_rn as attr4 ON attr4.item_id = i.item_id AND attr4.row_number = 4
left join attrib_rn as attr5 ON attr5.item_id = i.item_id AND attr5.row_number = 5
left join attrib_rn as attr6 ON attr6.item_id = i.item_id AND attr6.row_number = 6
left join attrib_rn as attr7 ON attr7.item_id = i.item_id AND attr7.row_number = 7
```
|
Flatten multiple query results with same ID to single row?
|
[
"",
"sql",
"sql-server",
""
] |
I am trying to find out an ideal way to automatically copy new records from one database to another. the databases have different structure! I achieved it by writing VBS scripts which copy the data from one to another and triggered the scripts from another application which passes arguments to the script. But I faced issues at points where there were more than 100 triggers. i.e. 100wscript processes trying to access the database and they couldn't complete the task.
I want to find out a simpler solution inside SQL, I read about setting triggers, Stored procedure and running them from SQL agent, replication etc. The requirement is that I have to copy records to another database periodically or when there is a new record into another database.
Which method will suit me the best?
|
You can use [CDC](https://msdn.microsoft.com/en-us/library/bb895315.aspx) to do this activity. Create a SSIS package using CDC and run that package periodically through SQL Server Agent Job. CDC will store all the changes of that table and will do all those changes to the destination table when you run the package. Please follow the below link.
<http://sqlmag.com/sql-server-integration-services/combining-cdc-and-ssis-incremental-data-loads>
|
The word periodically in your question suggests that you should go for Jobs. You can schedule jobs in SQL Server using Sql Server agent and assign a period. The job will run your script as per assigned frequency.
|
SQL: Automatically copy records from one database to another database
|
[
"",
"sql",
"database",
"sql-server-2012",
""
] |
I have a select statement I am trying to make for a report. I have it pulling data and everything I need but I noticed that since I have to use the group by it is dropping off rows that do not exist in a table. How can I stop this or make it work.
```
SELECT Sum(CASE WHEN direction = 'I' THEN 1 ELSE 0 END) InBound,
Sum(CASE WHEN direction = 'O' THEN 1 ELSE 0 END) OutBound,
Sum(CASE WHEN direction = 'I' THEN p.duration ELSE 0 END) InBoundTime,
Sum(CASE WHEN direction = 'O' THEN p.duration ELSE 0 END) OutBoundTime,
u.fullname,
( CASE
WHEN EXISTS (SELECT g.goalamount
FROM [tblbrokergoals] AS g
WHERE ( g.goaldate BETWEEN
'2016-03-21' AND '2016-03-27' ))
THEN
g.goalamount
ELSE 0
END ) AS GoalAmount
FROM [tblphonelogs] AS p
LEFT JOIN [tblusers] AS u
ON u.fullname = p.phonename
LEFT OUTER JOIN [tblbrokergoals] AS g
ON u.fullname = g.brokername
WHERE ( calldatetime BETWEEN '2016-03-21' AND '2016-03-27' )
AND ( u.userid IS NOT NULL )
AND ( u.direxclude <> '11' )
AND u.termdate IS NULL
AND ( g.goaldate BETWEEN '2016-03-21' AND '2016-03-27' )
GROUP BY u.fullname,
g.goalamount;
```
This works and grabs all the data when the user is in BrokerGoals but, when the user is not in broker goals it just deletes that row on the returned result set. How can I get it so when the user doesnt not exist in the brokergoals table to set that value as 0 or -- so the row does not get deleted.
|
```
SELECT u.FullName,
SUM(CASE WHEN Direction = 'I' THEN 1 ELSE 0 END) AS InBound,
SUM(CASE WHEN Direction = 'O' THEN 1 ELSE 0 END) OutBound,
SUM(CASE WHEN Direction = 'I' THEN p.Duration ELSE 0 END) InBoundTime,
SUM(CASE WHEN Direction = 'O' THEN p.Duration ELSE 0 END) OutBoundTime,
CASE WHEN EXISTS (
SELECT g.GoalAmount
FROM [Portal].[dbo].[tblBrokerGoals] AS g
WHERE g.GoalDate BETWEEN '2016-03-21' AND '2016-03-27'
AND u.FullName = g.BrokerName
) THEN (
SELECT g.GoalAmount
FROM [Portal].[dbo].[tblBrokerGoals] AS g
WHERE g.GoalDate BETWEEN '2016-03-21' AND '2016-03-27'
AND u.FullName = g.BrokerName
) ELSE '0' END AS GoalAmount
FROM [Portal].[dbo].[tblUsers] AS u
LEFT JOIN [Portal].[dbo].[tblPhoneLogs] AS p
ON u.FullName = p.PhoneName
WHERE u.UserID IS NOT NULL
AND u.DirExclude <> '11'
AND u.TermDate IS NULL
AND p.CallDateTime BETWEEN '2016-03-21' AND '2016-03-27'
GROUP BY u.FullName
```
This is what I ended up doing to fix my problem. I added the Case When Exists statement and in the then statement did the select else it is 0.
|
If you have a `brokers` table then you can use it for your `left join`
```
SELECT b.broker_id, ....
FROM brokers b
LEFT JOIN .... ALL YOUR OTHER TABLES
....
GROUP BY b.broker_id, ....
```
If your brokers has duplicate names then use
```
SELECT b.broker_id, ....
FROM (SELECT DISTINCT broker_id
FROM brokers) b
LEFT JOIN .... ALL YOUR OTHER TABLES
....
GROUP BY b.broker_id, ....
```
|
Case sql not working
|
[
"",
"sql",
"group-by",
"case",
"exists",
""
] |
I have a table called "`Accounts`" with a `composite primary key` consisting of 2 columns: `Account_key` and `Account_Start_date` both with the datatype `int` and another non key column named `Accountnumber(bigint).`
Account\_key should have one or many `Accountnumber(bigint)` and not the other way around meaning **1 or many Accountnumber can only have 1 Account\_key**.
If you try to insert same Account\_key and same Account\_Start\_date then the `primary key constraint` is stopping this of course because they are together primary key.
However if you insert existing Account\_key with different non existing Account\_Start\_date then you could insert a random Accountnumber as you wish without any constraint complaining about it, **and suddenly you have rows with many to many relations between Account\_key and Accountnumber and we dont want that**.
I have tried with a lot of constrains without any luck. I just don't know what I am doing wrong here so please go ahead and help me on this, thanks!
(Note: I dont think changing the composite primary key is an option because then we will loose the slowly changing dimension date functionality)
There is another table (case) where 1 'Account\_Key' can only be related to 1 'AccountNumber' meaning 1..1 relation, all other things is the same except that there should be 1..1 relation between them.
Unique index havent work for me at least, just consider if I wanted to change `Accounts` table or put a trigger or even a Index so it will be 1..1 relation between 'Account\_Key' and 'AccountNumber', ?
|
If this were an OLTP table the solution would be to properly normalize the data into two tables, but this is a DW table so it makes sense to have it all in one table.
In this case, you should add a `FOR` / `AFTER` Trigger `ON INSERT, UPDATE` that does a query against the `inserted` pseudo-table. The query can be a simple `COUNT(DISTINCT Account_Key)`, joining back to the main table (to filter on just the `AccountNumber` values being added/updated), doing a `GROUP BY` on `AccountNumber` and then `HAVING COUNT(DISTINCT Account_Key) > 1`. Wrap that query in an `IF EXISTS` and if a row is returned, then execute a `ROLLBACK` to cancel the DML operation, a `RAISERROR` to send the error message about why the operation is being cancelled, and then `RETURN`.
```
CREATE TRIGGER dbo.TR_TableName_PreventDuplicateAccountNumbers
ON dbo.TableName
AFTER INSERT, UPDATE
AS
SET NOCOUNT ON;
IF (EXISTS(
SELECT COUNT(DISTINCT tab.Account_Key)
FROM dbo.TableName tab
INNER JOIN INSERTED ins
ON ins.AccountNumber = tab.AccountNumber
GROUP BY tab.AccountNumber
HAVING COUNT(DISTINCT tab.Account_Key) > 1
))
BEGIN
ROLLBACK;
RAISERROR(N'AccountNumber cannot be associated with more than 1 Account_Key', 16, 1);
RETURN;
END;
```
For the "other" table where the relationship between `Account_Key` and `AccountNumber` is 1:1, you might could try doing something like:
```
DECLARE @Found BIT = 0;
;WITH cte AS
(
SELECT DISTINCT tab.Account_Key, tab.AccountNumber
FROM dbo.TableName tab
INNER JOIN INSERTED ins
ON ins.Account_Key = tab.Account_Key
OR ins.AccountNumber = tab.AccountNumber
), counts AS
(
SELECT c.[Account_Key],
c.[AccountNumber],
ROW_NUMBER() OVER (PARTITION BY c.[Account_Key
ORDER BY c.[Account_Key, c.[AccountNumber]) AS [KeyCount],
ROW_NUMBER() OVER (PARTITION BY c.[AccountNumber]
ORDER BY c.[AccountNumber], c.[Account_Key) AS [NumberCount]
FROM cte c
)
SELECT @Found = 1
FROM counts
WHERE [KeyCount] > 1
OR [NumberCount] > 1;
IF (@Found = 1)
BEGIN
ROLLBACK;
RAISERROR(N'AccountNumber cannot be associated with more than 1 Account_Key', 16, 1);
RETURN;
END;
```
|
If I understand you correctly, you want:
1. Any given AccountNumber can only be related to one AccountKey
2. Any given AccountKey can be related to multiple AccountNumbers
If this is correct, you can achieve this with a `CHECK CONSTRAINT` that calls a UDF.
EDIT:
Psuedo-logic for the CHECK CONSTRAINT could be:
```
IF EXISTS anotherRow
WHERE theOtherAccountNumber = thisAccountNumber
AND theOtherAccountKey <> thisAccountKey
THEN False (do not allow this row to be inserted)
ELSE True (allow the insertion)
```
I would put this logic in a UDF that returns true or false to make the CHECK constraint simpler.
|
Enforcing 1:1 and 1:Many cardinality in denormalized warehouse table with composite Primary Key
|
[
"",
"sql",
"sql-server",
"database",
"t-sql",
"data-warehouse",
""
] |
I'm trying to figure out away to split the first 100,000 records from a table that has 1 million+ records into 5 (five) 20,000 records chunks to go into a file?
Maybe some SQL that will get the min and max rowid or primary id for each 5 chunks of 20,000 records, so I can put the min and max value into a variable and pass it into the SQL and use a BETWEEN in the where clause to the SQL.
Can this be done?
I'm on an Oracle 11g database.
Thanks in advance.
|
If you just want to assign values 1-5 to basically equal sized groups, then use `ntile()`:
```
select t.*, ntile(5) over (order by NULL) as num
from (select t.*
from t
where rownum <= 100000
) t;
```
If you want to insert into 5 different tables, then use `insert all`:
```
insert all
when num = 1 then into t1
when num = 2 then into t2
when num = 3 then into t3
when num = 4 then into t4
when num = 5 then into t5
select t.*, ntile(5) over (order by NULL) as num
from (select t.*
from t
where rownum <= 100000
) t;
```
|
A bit harsh down voting another fair question.
Anyway, NTILE is new to me, so I wouldn't have discovered that were it not for your question.
My way of doing this , the old school way, would have been to MOD the rownum to get the group number, e.g.
```
select t.*, mod(rn,5) as num
from (select t.*, rownnum rn
from t
) t;
```
This solves the SQL part, or rather how to group rows into equal chunks, but that is only half your question. The next half is how to write these to 5 separate files.
You can either have 5 separate queries each spooling to a separate file, e.g:
```
spool f1.dat
select t.*
from (select t.*, rownnum rn
from t
) t
where mod(t.rn,5) = 0;
spool off
spool f2.dat
select t.*
from (select t.*, rownnum rn
from t
) t
where mod(t.rn,5) = 1;
spool off
```
etc.
Or, using UTL\_FILE. You could try something clever with a single query and have an array of UTL\_FILE types where the array index matches the MOD(rn,5) then you wouldn't need logic like "IF rn = 0 THEN UTL\_FILE.WRITELN(f0, ...".
So, something like (not tested, just in a rough form for guidance, never tried this myself):
```
DECLARE
TYPE fname IS VARRAY(5) OF VARCHAR2(100);
TYPE fh IS VARRAY(5) OF UTL_FILE.FILE_TYPE;
CURSOR c1 IS
select t.*, mod(rn,5) as num
from (select t.*, rownnum rn
from t
) t;
idx INTEGER;
BEGIN
FOR idx IN 1..5 LOOP
fname(idx) := 'data_' || idx || '.dat';
fh(idx) := UTL_FILE.'THE_DIR', fname(idx), 'w');
END LOOP;
FOR r1 IN c1 LOOP
UTL_FILE.PUT_LINE ( fh(r1.num+1), r1.{column value from C1} );
END LOOP;
FOR idx IN 1..5 LOOP
UTL_FILE.FCLOSE (fh(idx));
END LOOP;
END;
```
|
SQL: How would you split a 100,000 records from a Oracle table into 5 chunks?
|
[
"",
"sql",
"database",
"oracle",
"max",
"min",
""
] |
I have query like this
```
SELECT
a.STOCK_ITEM_NO, a.STOCK_BEG_QTY, b.DOUT_QTY_ISSUE
FROM
INV_STOCK AS a
LEFT OUTER JOIN INV_DOUT AS b
ON a.STOCK_ITEM_NO = b.DOUT_ITEM_NO
WHERE a.STOCK_ITEM_NO = 'ABC01'
AND b.CREATEDDATE > '01-MAR-2016'
AND b.CREATEDDATE < '01-APR-2016'
```
There is `null` value when
```
select dout_qty_issue from inv_dout where b.CREATEDDATE > '01-MAR-2016' and b.CREATEDDATE < '01-APR-2016'
```
Because there is no data
So the result from the query is empty, as it fail to join (I think)
but can it return ?
```
'ABC01' | 10 | 0
```
Because now the query return
```
null | null| null
```
|
Put your date WHERE statement inside the LEFT JOIN.
```
SELECT a.STOCK_ITEM_NO,a.STOCK_BEG_QTY, b.DOUT_QTY_ISSUE
FROM INV_STOCK a
LEFT OUTER JOIN INV_DOUT b ON a.STOCK_ITEM_NO = b.DOUT_ITEM_NO
AND b.CREATEDDATE BETWEEN '01-MAR-2016' AND '01-APR-2016'
WHERE a.STOCK_ITEM_NO = 'ABC01'
```
|
```
SELECT a.STOCK_ITEM_NO
,a.STOCK_BEG_QTY
,ISNULL(b.DOUT_QTY_ISSUE,0) as DOUT_QTY_ISSUE
FROM INV_STOCK AS a
LEFT JOIN INV_DOUT AS b
ON a.STOCK_ITEM_NO = b.DOUT_ITEM_NO
AND b.CREATEDDATE > '01-MAR-2016'
AND b.CREATEDDATE < '01-APR-2016'
WHERE a.STOCK_ITEM_NO = 'ABC01'
```
Put the where condition to Join, you can display what you wanted.
|
SQL Left Join with null result return 0
|
[
"",
"sql",
"sql-server",
""
] |
I want to convert the decimal number 3562.45 to 356245, either as an `int` or a `varchar`. I am using `cast(3562.45 as int)`, but it only returns 3562. How do I do it?
|
Or you can replace the decimal point.
```
select cast(replace('3562.45', '.','') as integer)
```
This way, it doesn't matter how many decimal places you have.
|
How about the obvious:
```
CAST(3562.45*100 as INTEGER)
```
|
Convert decimal number to INT SQL
|
[
"",
"sql",
"sql-server",
"casting",
"decimal",
"number-formatting",
""
] |
I'm having an issue with SQL joins in a query that is designed to query the Post table having been joined to the comment, click and vote table and return stats about each posts activity. My query below is what I've been using.
```
SELECT
p.PostID,
p.Title,
CASE
WHEN COUNT(cm.CommentID) IS NULL THEN 0
ELSE COUNT(cm.CommentID)
END AS CommentCount,
CASE
WHEN COUNT(cl.ClickID) IS NULL THEN 0
ELSE COUNT(cl.ClickID)
END AS ClickCount,
CASE
WHEN SUM(vt.Value) IS NULL THEN 0
ELSE SUM(vt.Value)
END AS VoteScore
FROM
Post p
LEFT OUTER JOIN Comment cm ON p.PostID = cm.PostID
LEFT OUTER JOIN Click cl ON p.PostID = cl.PostID
LEFT OUTER JOIN Vote vt ON p.PostID = vt.PostID
GROUP BY
p.PostID,
p.Title
```
Yields the following result
```
| PostID | CommentCount | ClickCount | VoteScore |
|--------|--------------|------------|-----------|
| 41 | 60| 60| 60|
| 50 | 1683| 1683| 1683|
```
This, I know isn't correct. When comment out all but one of the joins:
```
SELECT
p.PostID
,p.Title
,CASE
WHEN COUNT(cm.CommentID) IS NULL THEN 0
ELSE COUNT(cm.CommentID)
END AS CommentCount
/*
,CASE
WHEN COUNT(cl.ClickID) IS NULL THEN 0
ELSE COUNT(cl.ClickID)
END AS ClickCount
,CASE
WHEN SUM(vt.Value) IS NULL THEN 0
ELSE SUM(vt.Value)
END AS VoteScore
*/
FROM
Post p
LEFT OUTER JOIN Comment cm ON p.PostID = cm.PostID
/*
LEFT OUTER JOIN Click cl ON p.PostID = cl.PostID
LEFT OUTER JOIN Vote vt ON p.PostID = vt.PostID
*/
GROUP BY
p.PostID,
p.Title
```
I get
```
| PostID | CommentCount |
|--------|--------------|
| 41 | 3|
```
Which is correct. Any ideas what I've done wrong?
Thanks.
|
The result that is being returned is expected because the query is producing a Cartesian (or semi-Cartesian) product. The query is basically telling MySQL to perform "cross join" operations on the rows returned from `comment`, `click` and `vote`.
Each row returned from `comment` (for a given postid) gets matched to each row from `click` (for the same postid). And then each of the rows in that result gets matched to each row from `vote` (for the same postid).
So, for two rows from `comment`, and three rows from `click` and four rows from `vote`, that will return a total of 24 (=2x3x4) rows.
The usual pattern for fixing this is to avoid the cross join operations.
There are a couple of approaches to do that.
---
**correlated subqueries in select list**
If you only need a single aggregate (e.g. COUNT or SUM) from each of the three tables, you could remove the joins, and use correlated subqueries in the SELECT list. Write a query that gets a count for a single postid, for example
```
SELECT COUNT(1)
FROM comment cmt
WHERE cmt.postid = ?
```
Then wrap that query in parens, and reference it in the SELECT list of another query, and replace the question mark to a reference to postid from the table referenced in the outer query.
```
SELECT p.postid
, ( SELECT COUNT(1)
FROM comment cmt
WHERE cmt.postid = p.postid
) AS comment_count
FROM post p
```
Repeat the same pattern to get "counts" from `click` and `vote`.
The downside of this approach is that the subquery in the SELECT list will get executed for *each* row returned by the outer query. So this can get expensive if the outer query returns a lot of rows. If `comment` is a large table, then to get reasonable performance, it's critical that there's appropriate index available on `comment`.
---
**pre-aggregate in inline views**
Another approach is to "pre-aggregate" the results inline views. Write a query that returns the comment count for postid. For example
```
SELECT cmt.postid
, COUNT(1)
FROM comment cmt
GROUP BY cmt.postid
```
Wrap that query in parens and reference it in the FROM clause of another query, assign an alias. The inline view query basically takes the place of a table in the outer query.
```
SELECT p.postid
, cm.postid
, cm.comment_count
FROM post p
LEFT
JOIN ( SELECT cmt.postid
, COUNT(1) AS comment_count
FROM comment cmt
GROUP BY cmt.postid
) cm
ON cm.postid = p.postid
```
And repeat that same pattern for `click` and `vote`. The trick here is the GROUP BY clause in the inline view query that guarantees that it won't return any duplicate postid values. And a cartesian product (cross join) to that won't produce duplicates.
The downside of this approach is that the derived table won't be indexed. So for a large number of postid, it may be expensive to perform the join in the outer query. (More recent versions of MySQL partially address this downside, by automatically creating an appropriate index.)
(We can workaround this limitation by creating a temporary able with an appropriate index. But this approach requires additional SQL statements, and is not entirely suitable for an adhoc single statement. But for batch processing of large sets, the additional complexity can be worth it for some significant performance gains.
---
**collapse Cartesian product by DISTINCT values**
As an entirely different approach, leave your query like it is, with the cross join operations, and allow MySQL to produce the Cartesian product. Then the aggregates in the SELECT list can filter out the duplicates. This requires that you have a column (or expression produced) from `comment` that is UNIQUE for each row in comment for a given postid.
```
SELECT p.postid
, COUNT(DISTINCT c.id) AS comment_count
FROM post p
LEFT
JOIN comment c
ON c.postid = p.postid
GROUP BY p.postid
```
The big downside of this approach is that it has the potential to produce a *huge* intermediate result, which is then "collapsed" with a "Using filesort" operation (to satisfy the GROUP BY). And this can be pretty expensive for large sets.
---
This isn't an exhaustive list of all possible query patterns to achieve the result you are looking to return. Just a representative sampling.
|
You probably want something like this:
```
SELECT
p.PostID,
p.Title,
(SELECT COUNT(*) FROM Comment cm
WHERE cm.PostID = p.PostID) AS CommentCount,
(SELECT COUNT(*) FROM Click cl
WHERE p.PostID = cl.PostID) AS ClickCount ,
(SELECT SUM(vt.Value) FROM Vote vt
WHERE p.PostID = vt.PostID) AS VoteScore
FROM
Post p
```
The problem with your query is that the second and third `LEFT JOIN` operations duplicate records: after the first `LEFT JOIN` has been applied you have, for example 3, records for post having `PostID = 41`. The second `LEFT JOIN` now joins to these 3 records, so `PostID = 41` is used **3 times** in the second `LEFT JOIN`.
If there is a 1:many relationship *directly* between (`Post`, `Comment`), (`Post`, `Click`) and (`Post`, `Vote`), then the above query will most probably give you what you want.
|
SQL query returns same value in each column
|
[
"",
"sql",
"sql-server",
"left-join",
""
] |
I have three tables. One consists of customers, one consists of products they have purchased and the last one of the returns they have done:
Table customer
```
CustID, Name
1, Tom
2, Lisa
3, Fred
```
Table product
```
CustID, Item
1, Toaster
1, Breadbox
2, Toaster
3, Toaster
```
Table Returns
```
CustID, Date, Reason
1, 2014, Guarantee
2, 2013, Guarantee
2, 2014, Guarantee
3, 2015, Guarantee
```
I would like to get all the customers that bought a Toaster, unless they also bought a breadbox, but not if they have returned a product more than once.
So I have tried the following:
```
SELECT * FROM Customer
LEFT JOIN Product ON Customer.CustID=Product.CustID
LEFT JOIN Returns ON Customer.CustID=Returns.CustID
WHERE Item = 'Toaster'
AND Customer.CustID NOT IN (
Select CustID FROM Product Where Item = 'Breadbox'
)
```
That gives me the ones that have bought a Toaster but not a breadbox. Hence, Lisa and Fred.
But I suspect Lisa to break the products on purpose, so I do not want to include the ones that have returned a product more than once. Hence, what do I add to the statement to only get Freds information?
|
How about
```
SELECT * FROM Customer
LEFT JOIN Product ON Customer.CustID=Product.CustID
WHERE Item = 'Toaster'
AND Customer.CustID NOT IN (
Select CustID FROM Product Where Item = 'Breadbox'
)
AND (SELECT COUNT(*) FROM Returns WHERE Customer.CustId = Returns.CustID) <= 1
```
|
The filter condition goes in the `ON` clause for all but the first table (in a series of `LEFT JOIN`:
```
SELECT *
FROM Customer c LEFT JOIN
Product p
ON c.CustID = p.CustID AND p.Item = 'Toaster' LEFT JOIN
Returns r
ON c.CustID = r.CustID
WHERE c.CustID NOT IN (Select p.CustID FROM Product p Where p.Item = 'Breadbox');
```
Conditions on the first table remain in the `WHERE` clause.
As a note: A table called `Product` that contains a `CustId` seems awkward. The table behaves more likes its name should `CustomerProducts`.
|
Filter on second left join - SQL
|
[
"",
"mysql",
"sql",
""
] |
I have a database with songs. Every song have an unique id. How I can generate a random unique value for every id in database?
Example:
```
id | song name
1 | song1
2 | song2
3 | song3
```
After shuffle
```
id | song name
45 | song1
96 | song2
10 | song3
```
Any idea?
|
Use a combination of `FLOOR` and `RAND()` to get what you want.
If there are 1000 songs in your db.
```
SELECT FLOOR(RAND() * (1 - 1000 + 1)) + 1 AS ID, "song name"
FROM yourtable
```
If there are 7000 songs in your db.
```
SELECT FLOOR(RAND() * (1 - 7000 + 1)) + 1 AS ID, "song name"
FROM yourtable
```
Update..
```
UPDATE yourtable
SET id = FLOOR(RAND() * (1 - 1000 + 1)) + 1;
```
|
Does the ID have to be an *integer*? If not you could think about using *GUIDS* . If this is a possibility for you, then you get further information for migrating your table here:
[Generate GUID in MySQL for existing Data?](https://stackoverflow.com/questions/6280789/generate-guid-in-mysql-for-existing-data)
|
Shuffle database id
|
[
"",
"mysql",
"sql",
"random",
""
] |
I'm trying to parse a logging table in PostgreSQL **9.5**. Let's imagine I'm logging SMS sent from all the phones belonging to my company. For each record I have a timestamp and the phone ID.
I want to display how many SMS are sent by week but only for the phones that send SMS each week of the year.
My table is as following:
```
ββββββββββββββ¦βββββββββββ
β event_date β phone_id β
β βββββββββββββ¬βββββββββββ£
β 2016-01-05 β 1 β
β 2016-01-06 β 2 β
β 2016-01-13 β 1 β
β 2016-01-14 β 1 β
β 2016-01-14 β 3 β
β 2016-01-20 β 1 β
β 2016-01-21 β 1 β
β 2016-01-22 β 2 β
ββββββββββββββ©βββββββββββ
```
And I would like the following display
```
ββββββββββββββββ¦βββββββββββ¦βββββββββββββββ
β week_of_year β phone_id β count_events β
β βββββββββββββββ¬βββββββββββ¬βββββββββββββββ£
β 2016-01-04 β 1 β 1 β
β 2016-01-11 β 1 β 2 β
β 2016-01-18 β 1 β 2 β
ββββββββββββββββ©βββββββββββ©βββββββββββββββ
```
Only phone\_id 1 is displayed because this is the only ID with events in each week of the year.
Right now, I can query to group by week\_of\_year and phone\_IDs. I have the following result:
```
ββββββββββββββββ¦βββββββββββ¦βββββββββββββββ
β week_of_year β phone_id β count_events β
β βββββββββββββββ¬βββββββββββ¬βββββββββββββββ£
β 2016-01-04 β 1 β 1 β
β 2016-01-04 β 2 β 1 β
β 2016-01-11 β 1 β 2 β
β 2016-01-11 β 3 β 1 β
β 2016-01-18 β 1 β 2 β
β 2016-01-18 β 2 β 1 β
ββββββββββββββββ©βββββββββββ©βββββββββββββββ
```
How can I filter in order to only keep phone\_ids occurring for each week of year? I tried various subqueries but I must acknowledge I'm stuck. :-)
About the definition of `week_of_year`: as I want to consolidate data per week, I'm using in my select: `date_trunc('week', event_date)::date as interval`. And then I group by `interval` to have the number of SMS per `phone_id` per week.
About the date range, I just want this starting in 2016, I'm using a where condition in my query to ignore everything before: `WHERE event_date > '2016-01-01'`
I saw the request to create a SQL Fiddle but I have issues to do so, will do it if I'm not lucky enough to have a good hint to solve this.
Created a [quick **SQL Fiddle**](http://sqlfiddle.com/#!15/6021d/3/0), hope it would useful.
|
Your concept of year seems very fuzzy. Let me instead assume that you mean for a period of time over the range of your data.
```
with w as (
select date_trunc('week', event_date) as wk, phone_id, count(*) as cnt
from messages
group by 1, 2
),
ww as (
select w.*,
min(wk) over () as min_wk,
max(wk) over () as max_wk,
count(*) over (partition by phone_id) as numweeks
from w
)
select ww.wk, ww.phone_id, ww.cnt
from ww
where (max_wk - min_wk) / 7 = cnt - 1;
```
The first CTE just aggregates the data by week and phone id. The second CTE calculates the first and last week in the data (these could be replaced with constants), as well as the number of weeks for a given phone.
Finally, the `where` clause makes sure that the number of weeks spans the period of time.
|
Below assumes that your table represents a full year. You didn't specify that.
To find all phones that send SMSs every week, you can do something like
```
select phone_id, count(distinct extract(week from event_date)) as cnt
from table
having cnt >= 51
```
Note, I use 51, but the notion of a week in a year is a bit fuzzy, they
actually have 52 or 53 (partila) weeks. But 51 should be fine.
Anyway, And then you simply do
```
select phone_id, date_trunc('week', event_date), count(*)
from table
where phone_id in (.. query above ..)
group by 1, 2
```
Would be great if you provided sample data in [SQLFiddle](http://sqlfiddle.com/#!15/65f85/1)
|
How to display rows happening every week of a year?
|
[
"",
"sql",
"postgresql",
"timestamp",
"aggregate",
""
] |
* SQL Server 2008 R2
* A table with auto-increment key
* Many different threads have to batch insert rows in the table.
I would like to know how (if even it is possible) to insert the rows in a way that I'm absolutely sure that the keys of one thread's inserted rows will get sequential numbers.
For example if 2 threads are executing at the same time:
* Thread #1 insert 5 rows and get keys 1,2,3,4,5
* Thread #2 insert 5 rows and get keys 6,7,8,9,10
I must be sure not to get:
* Thread #1 rows get keys 1,3,4,8,9
* Thread #2 rows get keys 2,5,6,7,10
Obviously if each thread do some kind of loop and execute 5 times "INSERT INTO..." command it won't work because another thread can insert between.
But even if a thread use only one INSERT command to insert many rows, is it enough to guarantee the keys will be sequential ?
If yes, could you help me find where it is documented ? because I didn't.
If no, how is it possible to ensure that ?
---
EDIT Why I care of getting consecutive numbers:
It is mainly a performance concern, we actually have one integer not auto-increment column in a row, each thread lock the row, manually increment the column value by any number it needs, than release the row.
The problem with that is that only one thread can insert at a time, by testing we found that inserting rows in a new table auto-increment column, letting sql server manage identity assignment, was much faster.
Locking the entire table is not an option because it will result in the same problem as locking a common row.
The reason why I want to ensure that inserted row ids are sequential for a single thread insert is to reduce the need for code refactoring which actually works by keeping only the first number and the count, so the code can deduce what are the other numbers.
It is not a business concern to have the numbers being consecutives, so if it is impossible to do we will just have to keep each row number in an array instead but there is more code to refactor this way so I'm trying to avoid it if possible.
Please keep in mind that i'm totally aware that the design may not be ideal, but i'm working with a legacy "big ball of mud" system that I can't redesign much.
|
I was curious enough to test. On my virtual machine with SQL Server 2014 Express the answer is:
---
Generated `IDENTITY` values are **not** guaranteed to be sequential when multiple threads insert values. Even if it is a single `INSERT` statement that inserts several rows at once. (Under default transaction isolation level)
---
You can test it on your SQL Server 2008, but even if you don't see the same behaviour, it wouldn't be wise to rely on it, because it definitely changed in 2014.
Here is the full script to reproduce the test.
**Table**
```
CREATE TABLE [dbo].[test](
[ID] [int] IDENTITY(1,1) NOT NULL,
[dt] [datetime2](7) NOT NULL,
[V] [int] NOT NULL,
CONSTRAINT [PK_test] PRIMARY KEY CLUSTERED
(
[ID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
```
**INSERT script**
```
WAITFOR TIME '22:23:24';
-- set the time to about a minute in the future
-- open two windows in SSMS and run this script (F5) in both of them
-- they will start running at the same time specified above in parallel.
-- insert 1M rows in chunks of 1000 rows
-- in the first SSMS window uncomment these lines:
--DECLARE @VarV int = 0;
--WHILE (@VarV < 1000)
-- in the second SSMS window uncomment these lines:
--DECLARE @VarV int = 10000;
--WHILE (@VarV < 11000)
BEGIN
WITH e1(n) AS
(
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
) -- 10
,e2(n) AS (SELECT 1 FROM e1 CROSS JOIN e1 AS b) -- 10*10
,e3(n) AS (SELECT 1 FROM e1 CROSS JOIN e2) -- 10*100
,CTE_rn
AS
(
SELECT ROW_NUMBER() OVER (ORDER BY n) AS rn
FROM e3
)
INSERT INTO [dbo].[test]
([dt]
,[V])
SELECT
SYSDATETIME() AS dt
,@VarV
FROM CTE_rn;
SET @VarV = @VarV + 1;
END;
```
**Verifying the results**
```
WITH
CTE
AS
(
SELECT
[V]
,MIN(ID) AS MinID
,MAX(ID) AS MaxID
,MAX(ID) - MIN(ID) + 1 AS DiffID
FROM [dbo].[test]
GROUP BY V
)
SELECT
DiffID
,COUNT(*) AS c
FROM CTE
GROUP BY DiffID
ORDER BY c DESC;
```
This query calculates the `MIN` and `MAX` `ID` for each `V` (each chunk of 1000 inserted rows). If all `IDENTITY` values were generated sequentially, the difference between `MAX` and `MIN` IDs would always be exactly 1000. As we can see in the results, this is not the case:
**Result**
```
+--------+------+
| DiffID | c |
+--------+------+
| 1000 | 1940 |
| 2000 | 6 |
| 3000 | 3 |
| 1759 | 2 |
| 1477 | 2 |
| 1522 | 1 |
| 1524 | 1 |
| 1529 | 1 |
| 1538 | 1 |
| 1546 | 1 |
| 1548 | 1 |
| 1584 | 1 |
| 1585 | 1 |
| 1589 | 1 |
| 1597 | 1 |
| 1606 | 1 |
| 1611 | 1 |
| 1612 | 1 |
| 1620 | 1 |
| 1630 | 1 |
| 1631 | 1 |
| 1635 | 1 |
| 1658 | 1 |
| 1663 | 1 |
| 1675 | 1 |
| 1731 | 1 |
| 1749 | 1 |
| 1009 | 1 |
| 1038 | 1 |
| 1049 | 1 |
| 1055 | 1 |
| 1086 | 1 |
| 1102 | 1 |
| 1144 | 1 |
| 1218 | 1 |
| 1225 | 1 |
| 1263 | 1 |
| 1325 | 1 |
| 1367 | 1 |
| 1372 | 1 |
| 1415 | 1 |
| 1451 | 1 |
| 1761 | 1 |
| 1793 | 1 |
| 1832 | 1 |
| 1904 | 1 |
| 1919 | 1 |
| 1924 | 1 |
| 1954 | 1 |
| 1973 | 1 |
| 1984 | 1 |
| 2381 | 1 |
+--------+------+
```
In most cases, indeed, `IDENTITY` values were assigned sequentially, but in 60 cases out of 2000, they were not.
---
How to deal with it?
I personally prefer to use [`sp_getapplock`](https://msdn.microsoft.com/en-us/library/ms189823.aspx), rather than locking the table or increasing transaction isolation level.
But, end result is the same - you have to make sure that `INSERT` statements are not running in parallel.
---
In SQL Server 2012+ it is worth testing the behaviour of the new `SEQUENCE` feature. Specifically, the [`sp_sequence_get_range`](https://msdn.microsoft.com/en-us/library/ff878352.aspx) stored procedure that generates a range of sequence values from a sequence object. Let's leave this exercise to the reader.
|
use transactions.
The transaction will lock the table until you will commit so no other transaction will start until the previous is ended, so the identity values are safe
|
Sequential numbers for many rows inserted with auto-increment key
|
[
"",
"sql",
"sql-server",
"sql-server-2008-r2",
"auto-increment",
""
] |
I'm working on project where I have to combine records from two different tables and display them on the screen based on the parameters. My first table contain time slot records. Here is example of my SLOT\_TABLE:
```
ID Event_ID Time_Slots
1 150 7:00 AM - 7:15 AM
2 150 7:15 AM - 7:30 AM
3 150 7:30 AM - 7:45 AM
4 150 7:45 AM - 8:00 AM
5 150 8:00 AM - 8:15 AM
6 150 8:15 AM - 8:30 AM
```
My second table contain records for each user. Here is example of my REGISTRATION\_TABLE:
```
ID Event_ID Supervisor_ID Slot_ID User_ID Staff_ID
61 150 200 6 15 133
78 150 200 6 162 79
```
I have a problem to display all my time slots but with the records from the second table just for specific User. Here is example how I would like my records to be displayed:
```
ID Event_ID Time_Slots User_ID
1 150 7:00 AM - 7:15 AM
2 150 7:15 AM - 7:30 AM
3 150 7:30 AM - 7:45 AM
4 150 7:45 AM - 8:00 AM
5 150 8:00 AM - 8:15 AM
6 150 8:15 AM - 8:30 AM 162
```
As you can see I would like to display my time slots and display record just for my user with an id of 162, but not user of id 15 at the same time. I try to use this query to get that:
```
Select s.Time_Slots, r.User_ID
From SLOT_TABLE s
Left Outer Join REGISTRATION_TABLE r
On s.ID = r.SLOT_ID
Where s.EVENT_ID = '150'
```
But query above gave me this:
```
ID Event_ID Time_Slots User_ID
1 150 7:00 AM - 7:15 AM
2 150 7:15 AM - 7:30 AM
3 150 7:30 AM - 7:45 AM
4 150 7:45 AM - 8:00 AM
5 150 8:00 AM - 8:15 AM
6 150 8:00 AM - 8:15 AM 162
6 150 8:00 AM - 8:15 AM 15
```
So after that I tried to limit my query on User\_ID and I got this:
```
Select s.Time_Slots, r.User_ID
From SLOT_TABLE s
Left Outer Join REGISTRATION_TABLE r
On s.ID = r.SLOT_ID
Where s.EVENT_ID = '150'
And User_ID = '162'
ID Event_ID Time_Slots User_ID
6 150 8:00 AM - 8:15 AM 162
```
So my question is how I can get all time slots but at the same time to limit my query on User\_ID that I want? Is something like that even possible in sql? If anyone can help with this problem please let me know. Thanks in advance.
|
Try this. You need to edit line 8 with the user ID you are after and line 9 with the event ID you are after.
```
select a.id
,a.event_id
,case when b.user_id is not null then 'User_ID(' + b.user_id + ')' else a.time_slots end as time_slots
from slot_table a
left join registration_table b
on a.event_id = b.event_id
and b.slot_id = a.id
and b.user_id = '162' -- parameter 1
where a.event_id = '150' -- parameter 2
```
Edit: the above code will work for the original requirement, the below code for the new requirement:
```
select a.id
,a.event_id
,a.time_slots
,b.user_id
from slot_table a
left join registration_table b
on a.event_id = b.event_id
and b.slot_id = a.id
and b.user_id = '162' -- parameter 1
where a.event_id = '150' -- parameter 2
```
|
When you use WHERE you limit the final result, in your case you left outer join and then you select only the items with user. You need to use the ON clause of the LEFT JOIN in order to selectively join, while keeping the original records from the first table.
Maybe like this:
```
Select s.Time_Slots, r.User_ID
From SLOT_TABLE s
Left Outer Join REGISTRATION_TABLE r
On s.ID = r.SLOT_ID
-- here, in the ON clause, not in WHERE
And User_ID = '162'
--probably you also want to check the registration table EVENT_ID?
AND s.EVENT_ID=r.EVENT_ID
Where s.EVENT_ID = '150'
```
|
How to join two tables and get correct records?
|
[
"",
"sql",
"sql-server",
"join",
"left-join",
""
] |
I have a number of tables, around four, that I wish to join together. To make my code cleaner and readable (to me), I wish to join all at once and then filter at the end:
```
SELECT f1, f2, ..., fn
FROM t1 INNER JOIN t2 ON t1.field = t2.field
INNER JOIN t3 ON t2.field = t3.field
INNER JOIN t4 ON t3.field = t4.field
WHERE // filters here
```
But I suspect that placing each table in subqueries and filtering in each scope would make performance better.
```
SELECT f1, f2, ..., fn
FROM (SELECT t1_f1, t1_f2, ..., t1_fi FROM t1 WHERE // filter here) AS a
INNER JOIN
(SELECT t2_f1, t2_f2, ..., t2_fj FROM t2 WHERE // filter here) AS b
ON // and so on
```
Kindly advise which would lead to better performance and/or if my hunch is correct. I am willing to sacrifice performance to readability.
If indeed filtering in each subquery will be more efficient, does the architecture of database platform would make any difference or is this holds true for all RDBMS SQL flavors?
I'm using both SQL Server and Postgres.
|
The query optimizer will always attempt to take care of finding the most optimal plan from your SQL.
You should concentrate more on writing readable, maintainable code and then by analyzing the execution plan find the inefficient parts of your query (and more likely) the inefficient parts of your database and indexing design.
Moving your filtering around from the where clause to the join clause without any meaningful analysis is likely to be wasted effort.
|
Your first approach will always be better as the SQL engine will evaluate where conditions first and then perform joins. So while evaluating where clause, it will filter records if conditions are available.
```
SELECT f1, f2, ..., fn
FROM t1 INNER JOIN t2 ON t1.field = t2.field
INNER JOIN t3 ON t2.field = t3.field
INNER JOIN t4 ON t3.field = t4.field
WHERE // filters here
```
|
Joining multiple tables: where to filter efficiently
|
[
"",
"sql",
"sql-server",
"postgresql",
""
] |
I am having a table consists of to datetime columns "StartTime" and "CompleteTime". Initially completeTime column will be NULL untill the process is completed. And now my requirement is to display hours and minutes as shown Below
**Output:**
Ex: 2:01 Hr
(This Means "2" represents hours and "01" represents minutes)
**I Tried as below:**
```
Declare @StartDate dateTime = '2016-03-31 04:59:11.253'
Declare @EndDate dateTime = GETUTCDATE()
SELECT REPLACE(CONVERT(VARCHAR,CAST(DATEDIFF(second, @StartDate,
ISNULL(GETUTCDATE(),@EndDate)) / 36000.0 AS DECIMAL(9,2))) + ' hr','.',':')
```
**Output:**
```
0:05 hr
```
**Required Output:**
```
0:32 hr
```
**Note:** Hi while giving negative marking check my query once. I already referred some link's related to this but it won't work.
|
Try this
```
DECLARE @STARTDATE DATETIME = '2016-03-31 04:59:11.253'
DECLARE @ENDDATE DATETIME = GETUTCDATE()
SELECT CONVERT(VARCHAR(10),DATEDIFF(MINUTE, @STARTDATE, @ENDDATE)/60)+':'+CONVERT(VARCHAR(10),DATEDIFF(MINUTE, @STARTDATE, @ENDDATE)%60)+' hr' AS DIFF
```
Result:
```
Diff
0:52 hr
```
Diff more than 24 hour also will handle this
```
72:56 hr
```
|
try this (MS SQL query) -
```
Declare @StartDate dateTime = '2016-03-31 04:59:11.253'
Declare @EndDate dateTime = GETUTCDATE()
SELECT CONVERT(varchar(5),
DATEADD(minute, DATEDIFF(minute, @StartDate, @EndDate), 0), 114) + ' hr'
```
Result - 00:47 hr
|
How to display Hours and Minutes between two dates
|
[
"",
"sql",
"sql-server",
"sql-server-2008",
"sql-server-2012",
""
] |
I have mySQL table and have column which have null and not null data.
While running query and visibly i can see that BLOCKER column have null values.
```
mysql> select count(1), BLOCKER from mysql.PRSSTATE group by BLOCKER;
+----------+----------------+
| count(1) | BLOCKER |
+----------+----------------+
| 193403 | |
| 350 | Beta |
| 24 | Build |
```
If i issue query as shown below i am getting count(1) as zero.
```
mysql> select count(1) from mysql.PRSSTATE where BLOCKER is NULL;
+----------+
| count(1) |
+----------+
| 0 |
+----------+
1 row in set (0.13 sec)
```
My doubt is that it might have special character as i have migrated the data from some other system into this table. Wondering how to resolve this. It should be showing by "is null" statement.
|
`BLOCKER` may be has zero length:
```
select count(1) from mysql.PRSSTATE where (BLOCKER is NULL or BLOCKER = "");
```
|
The problem here is that you incorrectly assume that BLOCKER is NULL. In fact you are storing empty strings ("") and not a NULL value. You should modify your query to match both NULL and "" values:
```
select count(1) from mysql.PRSSTATE where BLOCKER IS NULL OR BLOCKER = "";
```
Alternatively modify your script (or whatever you've used to create those records) to insert NULL value when you have no data for the BLOCKER column or just don't pass anything and make sure that your BLOCKER column's definition is set to DEFAULT NULL.
If BLOCKER was NULL you would get the following output from your first query:
```
+----------+----------------+
| count(1) | BLOCKER |
+----------+----------------+
| 193403 | NULL |
| 350 | Beta |
| 24 | Build |
+----------+----------------+
```
|
MySQL column have null value but "is null" is not working
|
[
"",
"mysql",
"sql",
""
] |
I have a query that is supposed to return a list of customers with the most popular product type for each customer. I have have a query that sums up each product purchased in all given product types and lists them in descending order per customer
```
SELECT c.customer_name as cname, ptr.product_type as pop_gen, sum(od.quantity) as li
FROM product_type_ref as ptr
INNER JOIN product as p
on p.product_type_ref_id = ptr.product_type_ref_id
INNER JOIN order_detail as od
on od.product_id = p.product_id
INNER JOIN order as o
on o.order_id = od.order_id
INNER JOIN customer as c
on c.customer_id = o.customer_id
GROUP BY cname, pop_gen
ORDER BY cname, li DESC
```
which returns this data:
```
'andy','Drama',1000
'andy','Action',250
'andy','Comedy',100
'bebe','Drama',250
'bebe','Action',100
'bebe','Comedy',25
'buster','Action',825
'buster','Comedy',768
'buster','Drama',721
'buster','Romance',100
'ron','Romance',50
'ron','Comedy',10
```
how could i return this:
```
andy, Drama
bebe, Drama
buster, Action
ron, Romance
```
|
Classic `greatest-n-per-group`. One possible solution is to use `ROW_NUMBER()`:
```
WITH
CTE
AS
(
SELECT
c.customer_name as cname, ptr.product_type as pop_gen, sum(od.quantity) as li
,ROW_NUMBER() OVER(PARTITION BY c.customer_name ORDER BY sum(od.quantity) DESC) AS rn
FROM
product_type_ref as ptr
INNER JOIN product as p on p.product_type_ref_id = ptr.product_type_ref_id
INNER JOIN order_detail as od on od.product_id = p.product_id
INNER JOIN order as o on o.order_id = od.order_id
INNER JOIN customer as c on c.customer_id = o.customer_id
GROUP BY
cname, pop_gen
)
SELECT
cname, pop_gen, li
FROM CTE
WHERE rn = 1
ORDER BY cname;
```
|
In Postgres, you can just use `distinct on`:
```
SELECT DISTINCT ON (c.customer_name) c.customer_name as cname,
ptr.product_type as pop_gen, sum(od.quantity) as li
FROM product_type_ref as ptr
INNER JOIN product as p
on p.product_type_ref_id = ptr.product_type_ref_id
INNER JOIN order_detail as od
on od.product_id = p.product_id
INNER JOIN order as o
on o.order_id = od.order_id
INNER JOIN customer as c
on c.customer_id = o.customer_id
GROUP BY cname, pop_gen
ORDER BY cname, li DESC;
```
|
Postgresql returning the most popular genre of product per customer
|
[
"",
"sql",
"postgresql",
""
] |
I have problems with the following SQL Query:
```
SELECT job
FROM (SELECT job, COUNT(*) AS cnt
FROM Employee
GROUP BY job)
WHERE cnt=1
```
As Result it should only shows all jobs where cnt (count of jobs) equals 1.
When I test the select query above on Fiddle, I get following error :
```
Incorrect syntax near the keyword 'WHERE'.
```
SQLFiddle: <http://sqlfiddle.com/#!6/d812a/7>
|
No need to increase complexity by using sub-query when it is not require
```
SELECT job, count(job)
FROM Employee
GROUP BY job
having count(job)=1;
```
|
You need to provide alias name to the nested query
```
SELECT A.job
FROM (SELECT job, COUNT(*) AS cnt
FROM Employee
GROUP BY job)A
WHERE A.cnt=1
```
|
Nested SQL Query
|
[
"",
"mysql",
"sql",
"nested",
""
] |
So, I don't really understand the purpose of using an *implicit join* in SQL. In my opinion, it makes a join more difficult to spot in the code, and I'm wondering this:
Is there a greater purpose for actually wanting to do this besides the simplicity of it?
|
Fundamentally there is no difference between the implicit join and the explicit `JOIN .. ON ..`. Execution plans are the same.
I prefer the explicit notation as it makes it easier to read and debug.
Moreover, in the explicit notation you define the *relationship* between the tables in the `ON` clause and the *search condition* in the `WHERE` clause.
|
[Explicit vs implicit SQL joins](https://stackoverflow.com/questions/44917/explicit-vs-implicit-sql-joins)
When you join several tables no matter how the join condition written, anyway optimizer will choose execution plan it consider the best. As for me:
1) Implicit join syntax is more concise.
2) It easier to generate it automatically, or produce using other SQL script.
So I use it sometimes.
|
What's the purpose of an IMPLICIT JOIN in SQL?
|
[
"",
"mysql",
"sql",
"join",
"implicit",
""
] |
I have a table with power outage information, which looks like this,
```
KEY OUTAGE TIME POWER LINE ID
1 1/1 2:30 pm 75
2 1/5 4:00 pm 247
3 1/5 6:00 pm 247
4 1/3 8:00 am 11
```
KEY is just the primary key of the table. Outage time tells us when the outage occurred, and power line ID is just the identification number of the line that the outage occurred on.
I have a second table with meter information, that looks like this,
```
event_ID event_timestamp event_info POWER LINE ID
44 1/5 1:45 pm power on 247
45 1/5 1:45 pm power on 247
46 1/5 3:45 pm fault detected 247
47 1/5 3:55 pm power off 247
48 1/5 3:58 pm power off 247
49 1/5 5:15 pm power on 247
50 1/5 5:45 pm power off 247
51 1/5 5:50 pm power off 247
52 1/5 5:55 pm power off 247
53 1/5 5:59 pm power off 247
```
The goal is the following: For each outage, select all the meter events that happened on that power line before the outage time, and after the most recent "power on" signal that occurred on that power line.
For example, for outage #2, we would look at all the meter events before 1/5 4:00 pm (outage time), but all the events that occurred after 1/5 1:45 pm, since this is the most recent "power on" signal that occurred before the outage. For outage #3, we would look at all the events that occurred before 1/5 6:00 pm (outage time), but after 1/5 5:15 pm, since this is the most recent "power on" signal that occurred before the outage time.
I have done this with a cursor that loops through the outage table and calls a stored procedure that selects the desired events from the events table, but these tables are very large and the cursor is taking too long. I would like to know of a set-based way to approach this problem. Thanks!
EDIT: Sorry, I completely forgot to post the sample output. Here it is.
```
KEY event_ID POWER LINE ID
2 46 247
2 47 247
2 48 247
3 50 247
3 51 247
3 52 247
3 53 247
```
EDIT (again): I'm looking for a solution to this in Oracle. I'm sorry for the edits, first time posting a question on here.
|
I'm solving the problem using SQL Server so `#` means temp table.
Assuming below tables and data
```
create table #outage ([key] int, outage_time datetime, power_line int)
insert into #outage values
(1, '2015/1/1 2:30 pm', 75),
(2, '2015/1/5 4:00 pm', 247),
(3, '2015/1/5 6:00 pm', 247),
(4, '2015/1/3 8:00 am', 11)
create table #even (event_ID int , event_time datetime,
event_info varchar(20), power_line int)
insert into #even values
(44, '2015/1/5 1:45 pm' ,'power on' ,247),
(45, '2015/1/5 1:45 pm' ,'power on' ,247),
(46, '2015/1/5 3:45 pm' ,'fault detected' ,247),
(47, '2015/1/5 3:55 pm' ,'power off' ,247),
(48, '2015/1/5 3:58 pm' ,'power off' ,247),
(49, '2015/1/5 5:15 pm' ,'power on' ,247),
(50, '2015/1/5 5:45 pm' ,'power off' ,247),
(51, '2015/1/5 5:50 pm' ,'power off' ,247),
(52, '2015/1/5 5:55 pm' ,'power off' ,247),
(53, '2015/1/5 5:59 pm' ,'power off' ,247)
```
This is the query:
```
select o.[key], e.event_ID, o.power_line
from #outage o
inner join #even e on e.power_line = o.power_line
and e.event_time < o.outage_time
and e.event_time > (select max(event_time) from #even
where power_line = o.power_line
and event_time < o.outage_time
and event_info = 'power on')
```
|
Try this:
```
SELECT power_outage.key,
meters.event_id,
power_outage.power_line_id
FROM power_outage
JOIN meter_info meters
ON power_outage.power_line_id = meters.power_line_id
AND meters.event_timestamp < power_outage.outage_time
WHERE meters.event_timestamp > (SELECT MAX(lpo.event_timestamp)
FROM meter_info lpo -- LastPowerOn
WHERE lpo.power_line_id = power_outage.power_line_id
AND lpo.event_info = 'power on'
AND lpo.event_timestamp < power_outage.outage_time);
```
The 'JOIN' gets all elements as far as they occurred before the outage time while the condition filters the elements corresponding to the most recent power on.
|
Finding a set based solution instead of looping through each row in SQL
|
[
"",
"sql",
"oracle",
""
] |
I have 2 relationships:
1. Agents(many)-to-Properties(many) relationship (with a pivot table).
2. Properties(one)-to-Images(many) relationship (no pivot table).
So an Agent can have 10 Properties, and each Property will have 10 Images; therefore, Agent has 100 Images. (I do not want to create a relationship between the Agents and Images).
**Is there a query that will allow me get all of the Agent's Images?**
Something similar to `$agent->properties()->images()->get()`
|
You can use hasManyThrough <https://laravel.com/docs/5.1/eloquent-relationships#has-many-through>
In your Agent model:
```
public function images()
{
return $this->hasManyThrough('App\Images', 'App\Properties');
}
```
Then you can use
```
$agent->images()->get();
```
|
So, how about two queries like:
```
<?php
$agent = new Agent();
$image = new Image();
$propertyIds = $agent->properties()->lists('id');
$images = $image->newQuery()->whereIn('property_id', $propertyIds)->get();
```
|
How to get items from many to many relationships' one to many relationship in Laravel?
|
[
"",
"sql",
"database",
"laravel",
"orm",
"relationship",
""
] |
i have table storing product price information, the table looks similar to, (no is the primary key)
```
no name price date
1 paper 1.99 3-23
2 paper 2.99 5-25
3 paper 1.99 5-29
4 orange 4.56 4-23
5 apple 3.43 3-11
```
right now I want to select all the rows where the "name" field appeared more than once in the table. Basically, i want my query to return the first three rows.
I tried:
```
SELECT * FROM product_price_info GROUP BY name HAVING COUNT(*) > 1
```
but i get an error saying:
> column "product\_price\_info.no" must appear in the GROUP BY clause or be used in an aggregate function
|
```
SELECT *
FROM product_price_info
WHERE name IN (SELECT name
FROM product_price_info
GROUP BY name HAVING COUNT(*) > 1)
```
|
Try this:
```
SELECT no, name, price, "date"
FROM (
SELECT no, name, price, "date",
COUNT(*) OVER (PARTITION BY name) AS cnt
FROM product_price_info ) AS t
WHERE t.cnt > 1
```
You can use the window version of `COUNT` to get the population of each `name` partition. Then, in an outer query, filter out `name` partitions having a population that is less than 2.
|
Postgres: select all row with count of a field greater than 1
|
[
"",
"sql",
"postgresql",
""
] |
I have a table like this.
```
|DATE |VOUCHER_NO|CURRENCY|AMOUNT|DESCRIPTION|JOURNAL_TYPE|COA_NO |
|03/30/2016|0000000001|USD |2000 |ABCD |CREDIT |150001 |
|03/30/2016|0000000001|USD |2000 |ABCD |DEBIT |150001 |
|03/30/2016|0000000002|USD |1500 |ABCD |CREDIT |150002 |
|03/30/2016|0000000002|USD |1000 |ABCD |DEBIT |150002 |
|03/30/2016|0000000002|USD |500 |ABCD |DEBIT |150002 |
|03/30/2016|0000000003|USD |4000 |ABCD |CREDIT |150003 |
|03/30/2016|0000000003|USD |2000 |ABCD |DEBIT |150003 |
|03/30/2016|0000000003|USD |1000 |ABCD |DEBIT |150003 |
|03/30/2016|0000000003|USD |500 |ABCD |DEBIT |150003 |
|03/30/2016|0000000003|USD |500 |ABCD |DEBIT |150003 |
```
I have to sort the table by the greatest Amount, with Journal Type Credit first. And it has to be followed by its Debit based on the Voucher\_No (without concerning the Debit's amount), then next to the second greatest amount, like this.
```
|DATE |VOUCHER_NO|CURRENCY|AMOUNT|DESCRIPTION|JOURNAL_TYPE|COA_NO |
|03/30/2016|0000000003|USD |4000 |ABCD |CREDIT |150003 |
|03/30/2016|0000000003|USD |2000 |ABCD |DEBIT |150003 |
|03/30/2016|0000000003|USD |1000 |ABCD |DEBIT |150003 |
|03/30/2016|0000000003|USD |500 |ABCD |DEBIT |150003 |
|03/30/2016|0000000003|USD |500 |ABCD |DEBIT |150003 |
|03/30/2016|0000000001|USD |2000 |ABCD |CREDIT |150001 |
|03/30/2016|0000000001|USD |2000 |ABCD |DEBIT |150001 |
|03/30/2016|0000000002|USD |1500 |ABCD |CREDIT |150002 |
|03/30/2016|0000000002|USD |1000 |ABCD |DEBIT |150002 |
|03/30/2016|0000000002|USD |500 |ABCD |DEBIT |150002 |
```
How to do that? If I ordered first by amount, the Debit ones will scatter to below, and if I ordered first by voucher\_no, it will not be ordered by amount.
|
You can `ORDER BY` a *group sum* first, like this
```
ORDER BY
MAX(AMOUNT) OVER (PARTITION BY VOUCHER_NO) DESC, -- voucher with highest amount first
VOUCHER_NO, -- all rows of that voucher
CASE WHEN JOURNAL_TYPE = 'CREDIT' THEN 0 ELSE 1 END, -- credit first
AMOUNT DESC
```
|
This should do the trick:
```
with your_table as (select to_date('30/03/2016', 'dd/mm/yyyy') dt, 1 voucher_no, 'USD' currency, 2000 amount, 'ABCD' description, 'Credit' journal_type, 150001 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 1 voucher_no, 'USD' currency, 2000 amount, 'ABCD' description, 'Debit' journal_type, 150001 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 2 voucher_no, 'USD' currency, 1500 amount, 'ABCD' description, 'Credit' journal_type, 150002 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 2 voucher_no, 'USD' currency, 1000 amount, 'ABCD' description, 'Debit' journal_type, 150002 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 2 voucher_no, 'USD' currency, 500 amount, 'ABCD' description, 'Debit' journal_type, 150002 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 3 voucher_no, 'USD' currency, 4000 amount, 'ABCD' description, 'Credit' journal_type, 150003 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 3 voucher_no, 'USD' currency, 2000 amount, 'ABCD' description, 'Debit' journal_type, 150003 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 3 voucher_no, 'USD' currency, 1000 amount, 'ABCD' description, 'Debit' journal_type, 150003 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 3 voucher_no, 'USD' currency, 500 amount, 'ABCD' description, 'Debit' journal_type, 150003 coa_no from dual union all
select to_date('30/03/2016', 'dd/mm/yyyy') dt, 3 voucher_no, 'USD' currency, 500 amount, 'ABCD' description, 'Debit' journal_type, 150003 coa_no from dual)
-- End of mimicking your table with data in. See SQL below:
select *
from your_table
order by max(amount) over (partition by voucher_no) desc,
journal_type,
amount desc;
DT VOUCHER_NO CURRENCY AMOUNT DESCRIPTION JOURNAL_TYPE COA_NO
---------- ---------- -------- ---------- ----------- ------------ ----------
30/03/2016 3 USD 4000 ABCD Credit 150003
30/03/2016 3 USD 2000 ABCD Debit 150003
30/03/2016 3 USD 1000 ABCD Debit 150003
30/03/2016 3 USD 500 ABCD Debit 150003
30/03/2016 3 USD 500 ABCD Debit 150003
30/03/2016 1 USD 2000 ABCD Credit 150001
30/03/2016 1 USD 2000 ABCD Debit 150001
30/03/2016 2 USD 1500 ABCD Credit 150002
30/03/2016 2 USD 1000 ABCD Debit 150002
30/03/2016 2 USD 500 ABCD Debit 150002
```
N.B., if you're going to be using this query in a subquery, obviously you'll have to add the `max(amount) over (partition by voucher_no)` into the select column list in order to expose that column to the outer query.
---
ETA. If you could have the case where the debit amounts were greater than the credit amounts, and you're after the rows for the voucher code with the highest credit amount to be displayed first, then change:
```
max(amount) over (partition by voucher_no)
```
to
```
max(case when journal_type = 'Credit' then amount end) over (partition by voucher_no)
```
|
ORACLE - Custom ORDER BY to order pairs of data rows
|
[
"",
"sql",
"oracle",
""
] |
I have the following query that I'm trying to join two tables matching their ID so I can get the duplicated values in "c.code". I've tried a lot of queries but nothing works. I have a 500k rows in my database and with this query I only get 5k back, which is not right. Im positive it's at least 200K. I also tried to use Excel but it's too much for it to handle.
Any ideas?
Thanks in advance, everyone.
```
SELECT c.code, c.name as SCT_Name, t.name as SYNONYM_Name, count(c.code)
FROM database.Terms as t
join database.dbo.Concepts as c on c.ConceptId = t.ConceptId
where t.TermTypeCode = 'SYNONYM' and t.ConceptTypeCode = 'NAME_Code' and c.retired = '0'
Group by c.code, c.name, t.name
HAVING COUNT(c.code) > = 1
Order by c.code
```
|
```
with data as (
select c.code, c.name as SCT_Name, t.name as SYNONYM_Name
from database.Terms as t inner join database.dbo.Concepts as c
on c.ConceptId = t.ConceptId
where
t.TermTypeCode = 'SYNONYM'
and t.ConceptTypeCode = 'NAME_Code'
and c.retired = '0'
)
select *
--, (select count(*) from data as d2 where d2.code = data.code) as code_count
--, count(*) over (partition by code) as code_count
from data
where code in (select code from data group by code having count(*) > 1)
order by code
```
|
If you want just duplicates of c.code, your Group By is wrong (and so is your Having clause). Try this:
```
SELECT c.code
FROM database.Terms as t
join database.dbo.Concepts as c on c.ConceptId = t.ConceptId
where t.TermTypeCode = 'SYNONYM' and t.ConceptTypeCode = 'NAME_Code' and c.retired = '0'
Group by c.code
HAVING COUNT(c.code) > 1
```
This will return all rows where you have more than one c.code value.
|
Selecting ONLY Duplicates from a joined tables query
|
[
"",
"sql",
"sql-server",
"group-by",
"duplicates",
"large-data",
""
] |
I need to get the count of Male and Female users that did not place any order for a product. The result should show all the products and the number of male or female users that did not place any order.
I want the query results to look like this:
```
productid | productName | No_MaleUsers | No_FemaleUsers|
------------------------------------------------------------
1 | Jeans | 4 | 4 |
------------------------ -----------------------------------
2 | Apple Watch | 4 | 4 |
-------------------------------------------------------------
3 |A pair of socks | 5 | 4 |
-------------------------------------------------------------
4 |A pair of sneakers| 6 | 4 |
```
---
```
5 | Fast red car | 6 | 4 |
```
---
```
6 | Macbook | 6 | 4 |
```
---
Here is a Sample fiddle to help: <http://sqlfiddle.com/#!9/3a0e30>
In the Fiddle: User 1 and user 2 are the only ones that placed orders for products uid\_1(1,2) and uid\_2(1,2,3) and they are males. The remaining users did not place any order.
Update: I've updated my query to show what the final results should look like showing the male and female users that did not make place any orders
|
This is a complicated query. One approach is to use correlated subqueries:
```
select p.*,
(select count(*)
from users u
where u.uid not in (select o.uid from orders o where o.productid = p.productid) and
u.gender = 'Male'
) as NumMales,
(select count(*)
from users u
where u.uid not in (select o.uid from orders o where o.productid = p.productid) and
u.gender = 'Female'
) as NumFemales
from products p ;
```
[Here](http://sqlfiddle.com/#!9/3a0e30/36) is a SQL Fiddle.
|
This should help you get started with
```
SELECT p.productid,
p.productname,
COALESCE(total_maleusers, 0) - COALESCE(no_maleusers, 0) AS No_MaleUsers,
COALESCE(total_femaleusers, 0) - COALESCE(no_femaleusers, 0) AS No_FemaleUsers
FROM (SELECT p.productid,
p.productname,
total_maleusers,
total_femaleusers
FROM (SELECT Count(CASE WHEN u.gender = 'Male' THEN 1 END) Total_MaleUsers,
Count(CASE WHEN u.gender = 'Female' THEN 1 END) Total_FemaleUsers
FROM users u) u
CROSS JOIN products p) p
LEFT OUTER JOIN(SELECT p.productid,
p.productname,
Count(CASE WHEN u.gender = 'Male' THEN 1 END) No_MaleUsers,
Count(CASE WHEN u.gender = 'Female' THEN 1 END) No_FemaleUsers
FROM products p
JOIN orders o
ON p.productid = o.productid
JOIN users u
ON u.uid = o.uid
GROUP BY p.productid,
p.productname) up
ON p.productid = up.productid
```
[**SQL FIDDLE DEMO**](http://sqlfiddle.com/#!9/3a0e30/29)
|
List all products and count the male users, female users that did not place an Order
|
[
"",
"mysql",
"sql",
""
] |
i have semester term values such as fall 2011, spring 2011 and so on.
I want the result to be order in sequential manner
example
```
fall 2001
spring 2001
fall 2002
spring 2002
....
```
however I am getting
```
fall 2001
fall 2002
.....
spring 2001
spring 2002
.....
```
when doing `order by semester`
|
```
SELECT Semester
FROM Table
ORDER BY regexp_replace(Semester, '[^0-9]+', ''),
regexp_replace(Semester, '[^a-zA-Z]+', '')
```
This parses out the numbers from the text so you can first order by Year, then by the text.
Or you could just get the right 4 most characters and order by them first...
Assming the right 4 most characters will always be the year...
```
SELECT Semester
FROM Table
ORDER BY substr('spring 2001',-4), semester
```
|
Use a CASE expression in your ORDER BY:
```
SELECT SEMESTER, whatever
FROM SEMESTER_VALUES
ORDER BY CASE SEMESTER
WHEN 'spring 2001' THEN 1
WHEN 'fall 2001' THEN 2
WHEN 'spring 2002' THEN 3
WHEN 'fall 2002' THEN 4
WHEN 'spring 2003' THEN 5
WHEN 'fall 2003' THEN 6
WHEN 'spring 2004' THEN 7
WHEN 'fall 2004' THEN 8
WHEN 'spring 2005' THEN 9
WHEN 'fall 2005' THEN 10
END;
```
You can make the CASE expression as arbitrarily complex as you need.
Best of luck.
|
order by semester term in oracle
|
[
"",
"sql",
"oracle",
"sql-order-by",
""
] |
I have a complicated query that has to use a number called `SubUnitRate`. This variable comes from another table with special condition. in short we have:
```
DECLARE
SUBUNITRATE NUMBER;
BEGIN
SELECT NVL (NULLIF (CU.SUBUNITRATE, 0), 1)
INTO SUBUNITRATE
FROM CURRENCYS CU
JOIN ACCOUNTS ACC ON CU.ID = ACC.CURRENCY
WHERE ACC.ID = :ACCOUNTID;
END;
SELECT SUBUNITRATE * 100 FROM DUAL;
```
My goal is to acquire the result of(in simple case):
```
SELECT SUBUNITRATE * 100 FROM DUAL;
```
But how is that possible?
|
Assuming you want to use the value of SUBUNITRATE multiple times in the same query you could use the WITH clause:
```
with cte as (
select case
when CU.SUBUNITRATE = 0 then 1
else CU.SUBUNITRATE
end as SUBUNITRATE
FROM CURRENCYS CU
JOIN ACCOUNTS ACC ON CU.ID = ACC.CURRENCY
WHERE ACC.ID = :ACCOUNTID
)
select cte.SUBUNITRATE * 100
from cte;
```
|
A PL/SQL block cannot return the results of a query as a query. Instead, you can print the results out.
So, does this do what you want?
```
DECLARE
SUBUNITRATE NUMBER;
BEGIN
SELECT NVL(NULLIF(CU.SUBUNITRATE, 0), 1)
INTO SUBUNITRATE
FROM CURRENCYS CU JOIN
ACC
ON CU.ID = ACC.CURRENCY
WHERE ACC.ID = :ACCOUNTID;
DBMS_OUTPUT.PUT_LINE(SUBUNITRATE * 100)
END;
```
|
How to declare and assign value to a variable before query?
|
[
"",
"sql",
"oracle",
"plsql",
""
] |
I'm struggling with this! My data is like the table below except there would be more than one user. Note that it isn't just a start/end time, there are many dates in between.
```
+-------------------------+--------+---------------------------+
| Date | Name 2 | Access |
+-------------------------+--------+---------------------------+
| 2014-09-29 14:50:03.000 | User1 | Ground Floor Door 1 (In) |
+-------------------------+--------+---------------------------+
| 2014-09-30 08:42:33.000 | User1 | Ground Floor Door 2 (In) |
+-------------------------+--------+---------------------------+
| 2014-09-30 08:42:58.000 | User1 | 1st Floor Door 1 (In) |
+-------------------------+--------+---------------------------+
| 2014-09-30 12:31:54.000 | User1 | Ground Floor Door 1 (Out) |
+-------------------------+--------+---------------------------+
| 2014-09-30 13:05:43.000 | User1 | Ground Floor Door 1 (In) |
+-------------------------+--------+---------------------------+
| 2014-09-30 13:11:32.000 | User1 | Ground Floor Door 3 (Out) |
+-------------------------+--------+---------------------------+
| 2014-09-30 13:55:28.000 | User1 | Ground Floor Door 1 (In) |
+-------------------------+--------+---------------------------+
| 2014-09-30 13:55:36.000 | User1 | Ground Floor Door 2 (In) |
+-------------------------+--------+---------------------------+
| 2014-09-01 18:31:21.000 | User1 | Ground Floor Door 4 (Out) |
+-------------------------+--------+---------------------------+
| 2014-10-01 08:31:21.000 | User1 | Ground Floor Door 1 (In) |
+-------------------------+--------+---------------------------+
| 2014-10-01 08:31:41.000 | User1 | Ground Floor Door 2 (In) |
+-------------------------+--------+---------------------------+
| 2014-10-01 17:31:49.000 | User1 | Ground Floor Door 4 (Out) |
+-------------------------+--------+---------------------------+
| 2014-10-02 08:31:51.000 | User1 | Ground Floor Door 1 (In) |
+-------------------------+--------+---------------------------+
| 2014-10-02 18:41:49.000 | User1 | Ground Floor Door 3 (Out) |
+-------------------------+--------+---------------------------+
```
I need to obtain the minimum and maximum value for each day between a range of dates. If I could calculate how long the lunch period was (difference between the first and last swipes between 12pm-2pm) that would be great as well but not important until I can get the first part working.
I've tried various versions of the query below so far but had no joy. Can anyone help me please? I have no problem getting the min and max for a specific date, it's only when there is a range involved that it doesn't work.
```
select min(Date) as EntryTime, max(Date) as ExitTime
from table
where [Name 2] like '%User1%' and EventTime between '2014-09-30 12:00:00' and '2014-10-05 12:00:00'
group by cast(Date as datetime)
```
This doesn't give the desired result set. I want my query to return something like this for my table:
```
+-------------------------+-------------------------+----------+------------+
| EntryTime | ExitTime | Username | Date |
+-------------------------+-------------------------+----------+------------+
| 2014-09-30 08:42:33.000 | 2014-09-01 18:31:21.000 | User1 | 2014-09-01 |
+-------------------------+-------------------------+----------+------------+
| 2014-10-01 08:31:21.000 | 2014-10-01 17:31:49.000 | User1 | 2014-10-01 |
+-------------------------+-------------------------+----------+------------+
| 2014-10-02 08:31:51.000 | 2014-10-02 18:41:49.000 | User1 | 2014-10-02 |
+-------------------------+-------------------------+----------+------------+
```
|
I think the problem is with your cast, try `CAST AS DATE` :
```
select name_2,
min(`date`) as EntryTime,
max(`date`) as ExitTime,
cast(`date` As Date) as YourDate
from table
where UserName like '%User1%'
and EventTime between '2014-09-30 12:00:00' and '2014-10-05 12:00:00'
group by cast(`date` As Date), name_2
```
|
this will give you a list of users each with every day's min and max
```
select cast(Date as datetime) as Date,Name,min(Date) as EntryTime, max(Date) as ExitTime
from table
where Date between '2014-09-30 12:00:00' and '2014-10-05 12:00:00'
group by cast(Date as datetime),Name
```
you may need to cast Date field into Date type
|
Get minimum and maximum datetime for each row between a date range
|
[
"",
"sql",
"sql-server",
""
] |
I have been trying for days this SQL statement.
I have a DB made for sales and all I need to do is:
```
SELECT
SUM(orders.total) as total, orders.transaction_date as date,
orders.id as orderid, orders.employee_id as empl
from orders GROUP by orders.employee_id
```
This query is perfect it gives me all I need. However I need to add the quantity that has been sold so far that correspongs to this query:
```
SELECT order_id, SUM(quantity)
FROM order_items
Group By order_id
```
Which is also fine. Both work perfectly, but I need them into one sql statement.
When I try
```
SELECT
SUM(order_items.quantity),
SUM(orders.total) as total,
orders.transaction_date as date,
orders.id as orderid,
orders.employee_id as empl
from
orders, order_items
where
order_items.order_id = orders.id
GROUP by
orders.employee_id
```
Everything seems correct, except for the total. I have no idea why this is happening.
This is the DB
orders:
```
id | employee_id | transaction_date | total
```
order\_items:
```
order_id | quantity
```
|
You need to aggregate the two tables separately *before* joining them together:
```
select sum(oi.quantity), sum(o.total) as total,
o.employee_id as empl
from orders o join
(select oi.order_id, sum(oi.quantity) as quantity
from order_items oi
group by oi.order_id
) oi
on oi.order_id = o.id
group by o.employee_id;
```
It doesn't make sense to include the transaction date and order id in the `select`. You are aggregating by employee id, so there are (likely to be) multiple values for these columns.
|
You have two different `GROUP BY` statements between the first two queries, but they aren't both reflected in the merged query.
```
SELECT
SUM(order_items.quantity) as quantity,
SUM(orders.total) as total,
orders.transaction_date as date,
orders.id as orderid,
orders.employee_id as empl
from
orders
LEFT JOIN
order_items
ON
orders.order_id = order_items.order_id
where
order_items.order_id = orders.id
GROUP by
orders.employee_id, orders.order_id
```
However, this query still doesn't make sense. Each row would be a summary of all the orders and order items under one employee\_id. So transaction\_date or order id make no sense to select, because that would only give you the first order in the aggregate.
|
SQL - Unable to correctly calculate a SUM
|
[
"",
"mysql",
"sql",
""
] |
How can I count the number of occurrences of a substring within a string in PostgreSQL?
---
Example:
I have a table
```
CREATE TABLE test."user"
(
uid integer NOT NULL,
name text,
result integer,
CONSTRAINT pkey PRIMARY KEY (uid)
)
```
I want to write a query so that the `result` contains column how many occurrences of the substring `o` the column `name` contains. For instance, if in one row, `name` is `hello world`, the column `result` should contain `2`, since there are two `o` in the string `hello world`.
In other words, I'm trying to write a query that would take as input:
[](https://i.stack.imgur.com/Hg3Ey.png)
and update the `result` column:
[](https://i.stack.imgur.com/XO9DO.png)
---
I am aware of the function [`regexp_matches`](http://www.postgresql.org/docs/current/static/functions-string.html) and its `g` option, which indicates that the full (`g` = global) string needs to be scanned for the presence of all occurrences of the substring).
Example:
```
SELECT * FROM regexp_matches('hello world', 'o', 'g');
```
returns
```
{o}
{o}
```
and
```
SELECT COUNT(*) FROM regexp_matches('hello world', 'o', 'g');
```
returns
```
2
```
But I don't see how to write an `UPDATE` query that would update the `result` column in such a way that it would contain how many occurrences of the substring o the column `name` contains.
|
A common solution is based on this logic: *replace the search string with an empty string and divide the difference between old and new length by the length of the search string*
```
(CHAR_LENGTH(name) - CHAR_LENGTH(REPLACE(name, 'substring', '')))
/ CHAR_LENGTH('substring')
```
Hence:
```
UPDATE test."user"
SET result =
(CHAR_LENGTH(name) - CHAR_LENGTH(REPLACE(name, 'o', '')))
/ CHAR_LENGTH('o');
```
|
A Postgres'y way of doing this converts the string to an array and counts the length of the array (and then subtracts 1):
```
select array_length(string_to_array(name, 'o'), 1) - 1
```
Note that this works with longer substrings as well.
Hence:
```
update test."user"
set result = array_length(string_to_array(name, 'o'), 1) - 1;
```
|
Counting the number of occurrences of a substring within a string in PostgreSQL
|
[
"",
"sql",
"string",
"postgresql",
""
] |
Input Rows
```
userid | no | version_no
--------------|----------|--------------
abc | 100 | 1
abc | 2 | 1
abc | 101 | 2
abc | 3 | 2
def | 9 | 1
def | 1 | 2
def | 6 | 3
def | 8 | 4
```
I'd expect the output of the query to be:
```
abc | 104 | 2
def | 8 | 4
```
Can I do this using any any method other than self-joins ? I am using sql server.
The output no for abc - 104 is the sum of 101 and 3 from the inputs. If I have multiple rows for the latest version, I only want to display the sum of no's.
Apologies for editing the post multiple times.
|
You need to apply a ranking function after aggregation:
```
SELECT *
FROM
( SELECT userid, SUM(no) AS no_sum, version_no,
ROW_NUMBER()
OVER (PARTITION BY userid
ORDER BY version_no DESC) AS rn
FROM table_name
GROUP BY userid, version_no
) AS dt
WHERE rn = 1
```
|
To get just the aggregated results for the highest version\_no without a self join, you can use `TOP` and `ORDER BY`:
```
SELECT TOP 1
userid,
sum(no),
version_no
FROM your_table
GROUP BY userid, version_no
ORDER BY version_no DESC
```
`TOP 1` will return only the first record in the result set ordered by the `ORDER BY` clause of `version_no` in descending order.
|
Latest instance of sql row without self join
|
[
"",
"sql",
"sql-server",
""
] |
I am trying to get a singel result row per date in SQL, using a single table in a postgres DB. I tried using Union, but I think this is not the right way. Can somone help me construct the right SQL.
Sample Data Columns for Table content: id,creationdate,contenttype
```
1 |2016-04-02|PAGE
2 |2016-04-02|ATTACHMENT
3 |2016-04-02|PAGE
4 |2016-04-03|ATTACHMENT
5 |2016-04-03|PAGE
6 |2016-04-03|ATTACHMENT
7 |2016-04-03|PAGE
8 |2016-04-04|ATTACHMENT
9 |2016-04-04|ATTACHMENT
10|2016-04-04|ATTACHMENT
```
If Use this SQL query:
```
SELECT
date(creationdate) AS create_date,
COUNT(*) AS PAGE,0 AS ATTACHMENT
FROM
content
WHERE
contenttype='PAGE' GROUP BY content.creationdate
UNION
SELECT
date(creationdate) AS create_date,
0 AS PAGE,COUNT(*) AS ATTACHMENT
FROM
content
WHERE
contenttype='ATTACHMENT' GROUP BY content.creationdate
ORDER BY create_date ASC;
```
I get the result
```
|create_date|PAGE|ATTACHMENT|
|2016-04-02|0|2|
|2016-04-02|1|0|
|2016-04-03|0|2|
|2016-04-03|1|0|
|2016-04-04|3|0|
```
What I want is:
```
|create_date|PAGE|ATTACHMENT|
|2016-04-02|1|2|
|2016-04-03|2|2|
|2016-04-04|3|0|
```
|
You can do it with conditional aggregation by selecting from the table only once:
```
SELECT date(creationdate) AS create_date,
count(CASE WHEN contenttype='PAGE' then 1 end) as PAGE,
count(CASE WHEN contenttype='ATTACHMENT' then 1 end) as ATTACHMENT
FROM
content
GROUP BY content.creationdate
ORDER BY create_date ASC;
```
|
You need a *conditional aggregate*:
```
SELECT
date(creationdate) AS create_date,
SUM(CASE WHEN contenttype='PAGE' THEN 1 ELSE 0 END) AS PAGE,
SUM(CASE WHEN contenttype='ATTACHMENT' THEN 1 ELSE 0 END) AS ATTACHMENT
FROM
content
GROUP BY content.creationdate
ORDER BY create_date ASC;
```
|
Aggregating postgres union results
|
[
"",
"sql",
"postgresql",
""
] |
Generate the following two result sets:
**1).** Query an alphabetically ordered list of all names in OCCUPATIONS, immediately followed by the first letter of each profession as a parenthetical (i.e.: enclosed in parentheses). For example: AnActorName(A), ADoctorName(D), AProfessorName(P), and ASingerName(S).
**2).** Query the number of ocurrences of each occupation in OCCUPATIONS. Sort the occurrences in ascending order, and output them in the following format:
`There are total [occupation_count] [occupation]s.`
Table Name: Occupations
Total Columns: Two = 'Name' and 'Occupation', demo table is shown below:

**Sample Output:**
```
Ashely(P)
Christeen(P)
Jane(A)
Jenny(D)
Julia(A)
Ketty(P)
Maria(A)
Meera(S)
Priya(S)
Samantha(D)
There are total 2 doctors.
There are total 2 singers.
There are total 3 actors.
There are total 3 professors.
```
**My Approach:**
```
(SELECT NAME, '(', SUBSTRING(OCCUPATION, 1, 1), ')'
FROM OCCUPATIONS ORDER BY NAME)
UNION ALL
(SELECT COUNT(*) FROM OCCUPATIONS GROUP BY OCCUPATION ORDER BY ASEC);
```
**Error:**
```
ERROR 1222 (21000) at line 1:
The used SELECT statements have a different number of columns
```
Thank You!
|
Sometimes on HackerRank concat functon will give an error. You can use || to seperate in the same way.
So if the code below doesnt work for you:
```
(
SELECT CONCAT(NAME, '(', SUBSTRING(OCCUPATION, 1, 1), ')') as THETEXT, '1' as SELECTNUMBER
FROM OCCUPATIONS
)
UNION ALL
(
SELECT CONCAT('There are total ', COUNT(*),' ', OCCUPATION, (IF (COUNT(*) > 1, 's',''))) as THETEXT, '2' as SELECTNUMBER
FROM OCCUPATIONS GROUP BY OCCUPATION
)
ORDER BY SELECTNUMBER ASC, THETEXT ASC;
```
TRY THIS INSTEAD!
SELECT name || '(' || UPPER(SUBSTR(occupation, 1, 1)) || ')' FROM occupations ORDER BY name;
SELECT 'There are a total of' || ' ' || COUNT(occupation) || ' ' || LOWER(occupation) || 's' || '.' FROM occupations GROUP BY occupation ORDER BY COUNT(occupation) ASC;
|
I just tried on hackerrank and it works, You don't need to use Union.
```
select concat(name,'(',upper(substring(occupation,1,1)),')') from occupations
order by name;
select concat("There are a total of",' ',count(occupation),' ',lower(occupation),'s',".") from occupations
group by occupation
order by count(occupation) asc;
```
EDIT using UNION to remove the extra line :
```
SELECT CONCAT(NAME, '(' , LEFT(OCCUPATION, 1), ')')
FROM OCCUPATIONS
UNION
SELECT CONCAT('There are a total of ', ' ', COUNT(OCCUPATION), ' ', LOWER(OCCUPATION), 's.')
FROM OCCUPATIONS
GROUP BY OCCUPATION;
```
|
MySQL Query error using UNION/UNION ALL and Group By
|
[
"",
"mysql",
"sql",
"database",
""
] |
I'm running the below query to try and get all the categories in my forum, with their latest topics posted. There are some categories that have no topics posted yet and want to return those also.
```
SELECT cat_id,cat_name,cat_description, Null as topic_date,
Null as topic_subject
FROM categories
UNION ALL
SELECT cat_id, cat_name, cat_description, topic_date, topic_subject
FROM categories, topics t
WHERE t.topic_cat=cat_id AND topic_id IN
(SELECT topic_id FROM
(SELECT topic_id FROM
(SELECT topic_id, topic_subject, MAX(topic_date) AS 'recent'
FROM topics
GROUP BY topic_Cat
) a
) b
);
```
This query returns the following results:-
```
+--------+-----------+---------+------------+---------------+
| cat_id | cat_name | cat_des | topic_date | topic_subject |
+--------+-----------+---------+------------+---------------+
| 1 | T20 World| Posts1 | NULL | NULL |
| 2 | Test | delete1| NULL | NULL |
| 3 | Test 2 | txt | NULL | NULL |
| 1 | T20 World| Posts1 | 2016-04-01 01:54:01 | test |
| 2 | Test | delete1| 2016-04-01 03:05:58 | test |
+--------+---------------+------------------------+--------+
```
As you can see, cat\_id 3 has no posts yet and is returned as desired!, however cat\_id 1&2, is repeated.
I'm trying to remove the duplications and I'm not sure what I'm missing
|
What you need is a left outer join clause to join the results of topics to those of categories *only if they exist*. Somewhere along these lines:
```
SELECT c.cat_id, c.cat_name, c.cat_description,
max(t.topic_date), t.topic_subject
FROM categories c LEFT OUTER JOIN topics t ON t.topic_cat=c.cat_id
GROUP BY c.cat_id, c.cat_name, c.cat_description, t.topic_subject;
```
|
Replace `UNION ALL` with `UNION`. The latter removes duplicates.
However, by your code you're probably trying to do a `LEFT OUTER JOIN`, so you should check some tutorials on `OUTER JOIN`s
|
SQL query to combine two tables while also showing all records from one table
|
[
"",
"mysql",
"sql",
""
] |
I have a table with online sessions like this (empty rows are just for better visibility):
```
ip_address | start_time | stop_time
------------|------------------|------------------
10.10.10.10 | 2016-04-02 08:00 | 2016-04-02 08:12
10.10.10.10 | 2016-04-02 08:11 | 2016-04-02 08:20
10.10.10.10 | 2016-04-02 09:00 | 2016-04-02 09:10
10.10.10.10 | 2016-04-02 09:05 | 2016-04-02 09:08
10.10.10.10 | 2016-04-02 09:05 | 2016-04-02 09:11
10.10.10.10 | 2016-04-02 09:02 | 2016-04-02 09:15
10.10.10.10 | 2016-04-02 09:10 | 2016-04-02 09:12
10.66.44.22 | 2016-04-02 08:05 | 2016-04-02 08:07
10.66.44.22 | 2016-04-02 08:03 | 2016-04-02 08:11
```
And I need the "envelop" online time spans:
```
ip_address | full_start_time | full_stop_time
------------|------------------|------------------
10.10.10.10 | 2016-04-02 08:00 | 2016-04-02 08:20
10.10.10.10 | 2016-04-02 09:00 | 2016-04-02 09:15
10.66.44.22 | 2016-04-02 08:03 | 2016-04-02 08:11
```
I have this query which returns desired result:
```
WITH t AS
-- Determine full time-range of each IP
(SELECT ip_address, MIN(start_time) AS min_start_time, MAX(stop_time) AS max_stop_time FROM IP_SESSIONS GROUP BY ip_address),
t2 AS
-- compose ticks
(SELECT DISTINCT ip_address, min_start_time + (LEVEL-1) * INTERVAL '1' MINUTE AS ts
FROM t
CONNECT BY min_start_time + (LEVEL-1) * INTERVAL '1' MINUTE <= max_stop_time),
t3 AS
-- get all "online" ticks
(SELECT DISTINCT ip_address, ts
FROM t2
JOIN IP_SESSIONS USING (ip_address)
WHERE ts BETWEEN start_time AND stop_time),
t4 AS
(SELECT ip_address, ts,
LAG(ts) OVER (PARTITION BY ip_address ORDER BY ts) AS previous_ts
FROM t3),
t5 AS
(SELECT ip_address, ts,
SUM(DECODE(previous_ts,NULL,1,0 + (CASE WHEN previous_ts + INTERVAL '1' MINUTE <> ts THEN 1 ELSE 0 END)))
OVER (PARTITION BY ip_address ORDER BY ts ROWS UNBOUNDED PRECEDING) session_no
FROM t4)
SELECT ip_address, MIN(ts) AS full_start_time, MAX(ts) AS full_stop_time
FROM t5
GROUP BY ip_address, session_no
ORDER BY 1,2;
```
However, I am concerned about the performance. The table has hundreds of million rows and the time resolution is millisecond (not one Minute as given in example). Thus CTE `t3` is gonna be huge. Does anybody have a solution which avoids the Self-Join and "CONNECT BY"?
A single smart [Analytic Function](https://docs.oracle.com/cd/E11882_01/server.112/e41084/functions004.htm#SQLRF06174) would be great.
|
Try this one, too. I tested it the best I could, I believe it covers all the possibilities, including coalescing adjacent intervals (10:15 to 10:30 and 10:30 to 10:40 are combined into a single interval, 10:15 to 10:40). It should also be quite fast, it doesn't use much.
```
with m as
(
select ip_address, start_time,
max(stop_time) over (partition by ip_address order by start_time
rows between unbounded preceding and 1 preceding) as m_time
from ip_sessions
union all
select ip_address, NULL, max(stop_time) from ip_sessions group by ip_address
),
n as
(
select ip_address, start_time, m_time
from m
where start_time > m_time or start_time is null or m_time is null
),
f as
(
select ip_address, start_time,
lead(m_time) over (partition by ip_address order by start_time) as stop_time
from n
)
select * from f where start_time is not null
/
```
|
Please test this solution, it works for your examples, but there may be some cases I didn't notice. No connect-by, no self-join.
```
with io as (
select * from (
select ip_address, t1, io, sum(io) over (partition by ip_address order by t1) sio
from (
select ip_address, start_time t1, 1 io from ip_sessions
union all
select ip_address, stop_time, -1 io from ip_sessions ) )
where (io = 1 and sio = 1) or (io = -1 and sio = 0) )
select ip_address, t1, t2
from (
select io.*, lead(t1) over (partition by ip_address order by t1) as t2 from io)
where io = 1
```
Test data:
```
create table ip_sessions (ip_address varchar2(15), start_time date, stop_time date);
insert into ip_sessions values ('10.10.10.10', timestamp '2016-04-02 08:00:00', timestamp '2016-04-02 08:12:00');
insert into ip_sessions values ('10.10.10.10', timestamp '2016-04-02 08:11:00', timestamp '2016-04-02 08:20:00');
insert into ip_sessions values ('10.10.10.10', timestamp '2016-04-02 09:00:00', timestamp '2016-04-02 09:10:00');
insert into ip_sessions values ('10.10.10.10', timestamp '2016-04-02 09:05:00', timestamp '2016-04-02 09:08:00');
insert into ip_sessions values ('10.10.10.10', timestamp '2016-04-02 09:02:00', timestamp '2016-04-02 09:15:00');
insert into ip_sessions values ('10.10.10.10', timestamp '2016-04-02 09:10:00', timestamp '2016-04-02 09:12:00');
insert into ip_sessions values ('10.66.44.22', timestamp '2016-04-02 08:05:00', timestamp '2016-04-02 08:07:00');
insert into ip_sessions values ('10.66.44.22', timestamp '2016-04-02 08:03:00', timestamp '2016-04-02 08:11:00');
```
Output:
```
IP_ADDRESS T1 T2
----------- ------------------- -------------------
10.10.10.10 2016-04-02 08:00:00 2016-04-02 08:20:00
10.10.10.10 2016-04-02 09:00:00 2016-04-02 09:15:00
10.66.44.22 2016-04-02 08:03:00 2016-04-02 08:11:00
```
|
Get envelope.i.e overlapping time spans
|
[
"",
"sql",
"oracle",
"analytics",
"timespan",
""
] |
I have a simple SQL statement that does not seem to work. I want the of table match\_team ("match\_id") on the table match ("id").
Therefore I wrote the following INNER JOIN STATEMENT
```
SELECT * FROM match_team INNER JOIN match ON match_team.match_id = match.id
```
This throws an error however. Any thoughts on what might go wrong here?
|
The problem is that `match` is a reserved word in your RDBMS, you didn't specify your RDBMS, and it really depend on it, but try one of this:
```
SELECT * FROM match_team INNER JOIN `match` ON match_team.match_id = `match`.id
SELECT * FROM match_team INNER JOIN "match" ON match_team.match_id = "match".id
```
I don't know of any SQL language that uses another character for reserved words
|
You can use double quotes for avoiding keywords.
```
SELECT * FROM match_team INNER JOIN "match" ON match_team.match_id = "match".id
```
|
Inner join statement does not seem to work
|
[
"",
"sql",
""
] |
I have the following table in the database.
```
+----+---------+---------------------+-----------+
| id | user_id | timeid | course_id |
+----+---------+---------------------+-----------+
| 02 | 2 | 2016-01-27 19:24:17 | 14 |
| 03 | 2 | 2016-02-27 19:24:27 | 15 |
| 04 | 2 | 2016-03-27 19:24:37 | 16 |
| 05 | 3 | 2016-01-27 19:24:17 | 19 |
| 06 | 3 | 2016-02-27 19:24:27 | 18 |
| 07 | 3 | 2016-03-27 19:24:37 | 17 |
+----+---------+---------------------+-----------+
```
How can I extract only the latest rows sorted by `timeid` for each user\_id?
In other words:
```
| 04 | 2 | 2016-03-27 19:24:37 | 16 |
| 07 | 3 | 2016-03-27 19:24:37 | 17 |
```
|
A simple way uses `=` and a correlated subquery:
```
select t.*
from t
where t.timeid = (select max(t2.timeid) from t t2 where t2.user_id = t.user_id);
```
|
You can use the following query:
```
SELECT t1.id, t1.user_id, t1.timeid, t1.course_id
FROM mytable as t1
JOIN (
SELECT user_id, MAX(timeid) AS timeid
FROM mytable
GROUP BY user_id
) AS t2 ON t1.user_id = t2.user_id AND t1.timeid = t2.timeid
```
`t2` is a derived table containing the latest `timeid` value per `user_id`. If you join the original table to this you can get back all field of the record having the latest-per-group value.
|
How to extract latest rows from Database for specific column?
|
[
"",
"mysql",
"sql",
""
] |
I was wondering if its possible to add a condition like the code below in a select statement, and if it is, how should I do it ?
code that looks like these
> SELECT first\_name ,last\_name FROM persons\_table [condition: WHERE last\_name is on exclusiveList]
|
if your exclusiveList is on another table you can do:
```
SELECT first_name ,last_name FROM persons_table
WHERE last_name in (select lastName from exclusiveListTable)
```
or even nicer: use join as a filter:
```
select * from -- or select A.* from
(SELECT first_name ,last_name FROM persons_table) A
inner join
(select lastName from exclusiveListTable ) B
on A.last_name = B.lastName
```
|
It should be
```
SELECT first_name ,last_name FROM persons_table
WHERE last_name in ('name1','name2',,,,'nameN')
```
|
How to check a specific string in List on a SQL Select statement?
|
[
"",
"sql",
"t-sql",
"soql",
""
] |
I'm running MySql Server 5.7.11 and this sentence:
```
updated datetime NOT NULL DEFAULT '0000-00-00 00:00:00'
```
is **not** working. Giving the error:
```
ERROR 1067 (42000): Invalid default value for 'updated'
```
But the following:
```
updated datetime NOT NULL DEFAULT '1000-01-01 00:00:00'
```
**just works**.
The same case for DATE.
As a *sidenote*, it is mentioned in the [MySQL docs](http://dev.mysql.com/doc/refman/5.7/en/datetime.html):
> The DATE type is used for values with a date part but no time part. MySQL retrieves and displays DATE values in 'YYYY-MM-DD' format. The supported range is '1000-01-01' to '9999-12-31'.
even if they also say:
> Invalid DATE, DATETIME, or TIMESTAMP values are converted to the βzeroβ value of the appropriate type ('0000-00-00' or '0000-00-00 00:00:00').
Having also into account the second quote from MySQL documentation, could anyone let me know why it is giving that error?
|
The error is because of the sql mode which can be strict mode as per latest MYSQL 5.7 documentation.
[MySQL Documentation 5.7 says](http://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict):
> Strict mode affects whether the server permits '0000-00-00' as a valid date:
> If strict mode is not enabled, '0000-00-00' is permitted and inserts produce no warning.
> If strict mode is enabled, '0000-00-00' is not permitted and inserts produce an error, unless IGNORE is given as well. For INSERT IGNORE and UPDATE IGNORE, '0000-00-00' is permitted and inserts produce a warning.
**To Check MYSQL mode**
`SELECT @@GLOBAL.sql_mode global, @@SESSION.sql_mode session`
**Disabling STRICT\_TRANS\_TABLES mode**
However to allow the format `0000-00-00 00:00:00`you have to disable STRICT\_TRANS\_TABLES mode in mysql config file or by command
**By command**
`SET sql_mode = '';`
or
`SET GLOBAL sql_mode = '';`
Using the keyword `GLOBAL` requires super previliges and it affects the operations all clients connect from that time on
if above is not working than go to `/etc/mysql/my.cnf` (as per ubuntu) and comment out `STRICT_TRANS_TABLES`
Also, if you want to permanently set the sql mode at server startup then include `SET sql_mode=''` in `my.cnf` on Linux or MacOS. For windows this has to be done in `my.ini` file.
**Note**
However, strict mode is not enabled by default in MYSQL 5.6. Hence it does not produce the error as per [MYSQL 5.6 documentation](https://dev.mysql.com/doc/refman/5.6/en/date-and-time-types.html) which says
> MySQL permits you to store a βzeroβ value of '0000-00-00' as a βdummy date.β This is in some cases more convenient than using NULL values, and uses less data and index space. To disallow '0000-00-00', enable the NO\_ZERO\_DATE SQL mode.
**UPDATE**
Regarding the bug matter as said by @Dylan-Su:
I don't think this is a bug - it is the way MYSQL is evolved over the time due to which some things are changed based on further improvement of the product.
However I have another related bug report regarding the `NOW()` function:
[Datetime field does not accept default NOW()](https://bugs.mysql.com/bug.php?id=27645)
**Another Useful note** [see [Automatic Initialization and Updating for TIMESTAMP and DATETIME](https://dev.mysql.com/doc/refman/5.6/en/timestamp-initialization.html)]
> As of MySQL 5.6.5, TIMESTAMP and DATETIME columns can be automatically initializated and updated to the current date and time (that is, the current timestamp). Before 5.6.5, this is true only for TIMESTAMP, and for at most one TIMESTAMP column per table. The following notes first describe automatic initialization and updating for MySQL 5.6.5 and up, then the differences for versions preceding 5.6.5.
**Update Regarding NO\_ZERO\_DATE**
As of MySQL as of 5.7.4 this mode is deprecated. For previous version you must comment out the respective line in the config file. Refer [MySQL 5.7 documentation on NO\_ZERO\_DATE](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sqlmode_no_zero_date)
|
I got into a situation where the data was mixed between NULL and 0000-00-00 for a date field. But I did not know how to update the '0000-00-00' to NULL, because
```
update my_table set my_date_field=NULL where my_date_field='0000-00-00'
```
is not allowed any more.
My workaround was quite simple:
```
update my_table set my_date_field=NULL where my_date_field<'0000-01-01'
```
because all the incorrect `my_date_field` values (whether correct dates or not) were from before this date.
|
Error in MySQL when setting default value for DATE or DATETIME
|
[
"",
"mysql",
"sql",
"date",
"datetime",
"console",
""
] |
I have table called `emp` and I a trying to find the lowest paid Clerk. My code `select min(sal) as min from emp where job='CLERK';` works fine and i get this:
```
MIN
----------
800
```
but I also want to show the name of the clerk which is `Smith`. When I run this code `select ename, min(sal) as min from emp where job='CLERK' group by name;` it gives me all the Clerks in the table, which is not really want I want. Here is a snippet of my table:
```
CREATE TABLE EMP
(EMPNO NUMBER(4) NOT NULL,
ENAME VARCHAR2(10),
JOB VARCHAR2(9),
MGR NUMBER(4),
HIREDATE DATE,
SAL NUMBER(7, 2),
COMM NUMBER(7, 2),
DEPTNO NUMBER(2));
INSERT INTO EMP VALUES
(7369, 'SMITH', 'CLERK', 7902,
TO_DATE('17-DEC-1980', 'DD-MON-YYYY'), 800, NULL, 20);
INSERT INTO EMP VALUES
(7499, 'ALLEN', 'SALESMAN', 7698,
TO_DATE('20-FEB-1981', 'DD-MON-YYYY'), 1600, 300, 30);
```
|
try this
```
SELECT * FROM emp
WHERE SAL = (select MIN(SAL) sal from emp WHERE JOB ='CLERK')
and JOB ='CLERK';
```
|
You can use `row_number`:
```
select ename, sal as min
from (
select ename, sal,
row_number() over (order by sal) as rn
from emp
where job='CLERK' ) t
where t.rn = 1
```
|
Find min(sal) along with employee name SQL Oracle
|
[
"",
"sql",
"oracle",
"greatest-n-per-group",
""
] |
I need to do this but I don't know as. I have this table.
```
SSN | SALARY | MONTH YEAR
1234 1881,33 01 2008
8762 2578 01 2008
8726 2183,6475 01 2008
2321 1745,8525 01 2008
3123 1639,2 01 2008
1934 2572 01 2008
```
Is it possible to select from months of all years the person that has the bigger salary? In this case January 2008 was 8762.
|
```
SELECT *
FROM (
SELECT t.*,
ROW_NUMBER() OVER ( PARTITION BY year_month
ORDER BY money DESC ) AS rn
FROM (
SELECT A.ssn,
SUM(B.WAGE)- SUM(B.SALARY/(8*20)) AS money,
TRUNC( rep_date, 'MM' ) AS year_month
FROM REP_LINES A
INNER JOIN COSTS B
ON ( A.JOB=B.CAT_NUM
and B.YEAR = EXTRACT( YEAR FROM A.REP_DATE ) )
GROUP BY A.ssn,
TRUNC( rep_date, 'MM' )
) t
)
WHERE rn = 1;
```
|
You can use `keep`:
```
select year, month, max(salary) as salary,
max(ssn) over (dense_rank first order by salary desc) as max_ssn
from (select to_char(l.rep_date, 'YYYY') as year, to_char(l.rep_date, 'MM') as month,
l.SSN, (SUM(c.WAGE)- SUM(c.SALARY/(8*20))) as salary
from rep_lines l join
costs c
on l.job = c.cat_num and
to_char(c.year) = to_char(l.rep_date, 'YYYY')
)
group by year, month
order by year, month;
```
|
Retrieve rows with highest salary per month and year
|
[
"",
"sql",
"oracle",
"oracle11g",
"oracle-sqldeveloper",
""
] |
I am trying to get a date range using the following SQL condition in MySQL.
```
and a.timestamp >= '2016-03-29'
and a.timestamp <= '2016-03-30'
```
This returns 0 rows but when I try a longer range like
and a.timestamp >= '2016-03-20'
and a.timestamp <= '2016-03-31'
It returns the rows I want which is shown below in the timestamp column [](https://i.stack.imgur.com/Jmm8X.png)
Please what condition for the 2016-03-29 and 2016-03-30 would return the row in the image. Thanks
|
Presumably, you have no values on 2016-03-29. So, you only want to consider the *date* portion. One method is to use `date()`:
```
date(a.timestamp) >= '2016-03-29' and date(a.timestamp) <= '2016-03-30'
```
However, that is a bad habit, because the use of the function precludes the use of an index (if available). Instead, you probably intend:
```
a.timestamp >= '2016-03-29' and a.timestamp < '2016-03-31'
```
Note the change of `<=` to `<` for the second condition. This gets everything before that date, but not at midnight when the day begins.
|
Any time after midnight 2016-03-30 is greater than 2016-03-30, so you need to check like this to get values like "2016-03-30 15:55"
```
and a.timestamp >= '2016-03-29'
and a.timestamp < '2016-03-31'
```
This will return all datetime values on March 29th and March 30th.
|
Sql condition to return specific date range
|
[
"",
"mysql",
"sql",
""
] |
I have a table like this:
```
X Y
======
20 20
20 20
20 21
23 22
22 23
21 20
```
I need to find those rowid's where `X=Y` but their rowid is not the same? Like 1st row's `X` and 2nd row's `Y` is the same but they are in different rows.
|
you can do it many ways, and since you brought the `rowid` up, this is one of them:
```
select * from yourtable tab1 join yourtable tab2 on tab1.x = tab2.y and tab1.rowid <> tab2.rowid
```
|
You want duplicate rows:
```
select *
from
(
select x, y, rowid, count(*) over (partition by x,y) as cnt
from tab
where x=y
) dt
where cnt > 1
```
|
How to Find similar data with different Rowid in oracle?
|
[
"",
"sql",
"database",
"oracle",
"rowid",
""
] |
I have two tables `Emp` and `Dept` and I am trying to display how many people work in each department along with their department name, but I can't get it to work. I have tried this `select count(ename) as count from emp group by deptno;` but the output I am getting is this :
```
COUNT
----------
6
5
3
```
But I also want to show the names for each of those departments. Here are my tables :
Empt:
```
CREATE TABLE EMP
(EMPNO NUMBER(4) NOT NULL,
ENAME VARCHAR2(10),
JOB VARCHAR2(9),
MGR NUMBER(4),
HIREDATE DATE,
SAL NUMBER(7, 2),
COMM NUMBER(7, 2),
DEPTNO NUMBER(2));
INSERT INTO EMP VALUES
(7369, 'SMITH', 'CLERK', 7902,
TO_DATE('17-DEC-1980', 'DD-MON-YYYY'), 800, NULL, 20);
INSERT INTO EMP VALUES
(7499, 'ALLEN', 'SALESMAN', 7698,
TO_DATE('20-FEB-1981', 'DD-MON-YYYY'), 1600, 300, 30);
INSERT INTO EMP VALUES
(7521, 'WARD', 'SALESMAN', 7698,
TO_DATE('22-FEB-1981', 'DD-MON-YYYY'), 1250, 500, 30);
INSERT INTO EMP VALUES
(7566, 'JONES', 'MANAGER', 7839,
TO_DATE('2-APR-1981', 'DD-MON-YYYY'), 2975, NULL, 20);
INSERT INTO EMP VALUES
(7654, 'MARTIN', 'SALESMAN', 7698,
TO_DATE('28-SEP-1981', 'DD-MON-YYYY'), 1250, 1400, 30);
INSERT INTO EMP VALUES
(7698, 'BLAKE', 'MANAGER', 7839,
TO_DATE('1-MAY-1981', 'DD-MON-YYYY'), 2850, NULL, 30);
INSERT INTO EMP VALUES
(7782, 'CLARK', 'MANAGER', 7839,
TO_DATE('9-JUN-1981', 'DD-MON-YYYY'), 2450, NULL, 10);
INSERT INTO EMP VALUES
(7788, 'SCOTT', 'ANALYST', 7566,
TO_DATE('09-DEC-1982', 'DD-MON-YYYY'), 3000, NULL, 20);
INSERT INTO EMP VALUES
(7839, 'KING', 'PRESIDENT', NULL,
TO_DATE('17-NOV-1981', 'DD-MON-YYYY'), 5000, NULL, 10);
INSERT INTO EMP VALUES
(7844, 'TURNER', 'SALESMAN', 7698,
TO_DATE('8-SEP-1981', 'DD-MON-YYYY'), 1500, 0, 30);
INSERT INTO EMP VALUES
(7876, 'ADAMS', 'CLERK', 7788,
TO_DATE('12-JAN-1983', 'DD-MON-YYYY'), 1100, NULL, 20);
INSERT INTO EMP VALUES
(7900, 'JAMES', 'CLERK', 7698,
TO_DATE('3-DEC-1981', 'DD-MON-YYYY'), 950, NULL, 30);
INSERT INTO EMP VALUES
(7902, 'FORD', 'ANALYST', 7566,
TO_DATE('3-DEC-1981', 'DD-MON-YYYY'), 3000, NULL, 20);
INSERT INTO EMP VALUES
(7934, 'MILLER', 'CLERK', 7782,
TO_DATE('23-JAN-1982', 'DD-MON-YYYY'), 1300, NULL, 10);
```
Dept:
```
CREATE TABLE DEPT
(DEPTNO NUMBER(2),
DNAME VARCHAR2(14),
LOC VARCHAR2(13) );
INSERT INTO DEPT VALUES (10, 'ACCOUNTING', 'NEW YORK');
INSERT INTO DEPT VALUES (20, 'RESEARCH', 'DALLAS');
INSERT INTO DEPT VALUES (30, 'SALES', 'CHICAGO');
INSERT INTO DEPT VALUES (40, 'OPERATIONS', 'BOSTON');
```
|
Please try:
```
select count(*) as count,dept.DNAME
from emp
inner join dept on emp.DEPTNO = dept.DEPTNO
group by dept.DNAME
```
|
A request to list "Number of employees in each department" or "Display how many people work in each department" is the same as "For each department, list the number of employees", this must include departments with no employees. In the sample database, Operations has 0 employees. So a LEFT OUTER JOIN should be used.
```
SELECT dept.name, COUNT(emp.empno) AS count
FROM dept
LEFT OUTER JOIN emp ON emp.deptno = dept.deptno
GROUP BY dept.name;
```
|
Find the number of employees in each department - SQL Oracle
|
[
"",
"sql",
"oracle",
""
] |
Check the below Script
```
SELECT getdate() CurrentDate,getdate()+getdate() NewDate
```
Result is :
```
CurrentDate NewDate
2016-04-04 13:57:51.713 2132-07-08 03:55:43.427
```
My question is , why year is 2132 and Month is 07 in New Date field.
|
`1900-01-01` is date 0
```
SELECT CONVERT(datetime, 0)
```
when you add 2 dates together, it is implicitly convert to integer, perform the addition and then convert back to datetime
```
SELECT CONVERT(INT, getdate()), -- no of days since 1900-01-01
CONVERT(INT, getdate()) + CONVERT(INT, getdate()),
CONVERT(DATETIME, CONVERT(INT, getdate()) + CONVERT(INT, getdate()))
```
It is not logical to add 2 dates. But rather you would add x days to a date using `dateadd( day , x, a_date )`
|
As your dates are going down to seconds you should - as one learns this in school :-) - go down to the smallest unit, do the maths there and then - if needed - go back to any format you want to use for display.
The following will calculate the difference between two DATETIME values.
If your elapsed times are really DATETIME, you must be aware that you get into trouble, when this exceeds 24 hours or when the sum of them does. You should rather change the column type to INT and store the count of elapsed seconds. This is much closer to the *real* meaning and much easier to handle.
But - to give you a hint - this is one approach to calculate with datetime values:
```
CREATE TABLE #test(Elapsed1 DATETIME, Elapsed2 DATETIME);
INSERT INTO #test VALUES
( {d'2000-01-01'},DATEADD(DAY,3,{d'2000-01-01'})) --3 full days
,(GETDATE(),DATEADD(SECOND,180,GETDATE())) --3 minutes
--this would be your table of datetime values
SELECT Elapsed1
,Elapsed2
,DATEDIFF(SECOND,{d'2000-01-01'},Elapsed2) AS Elapsed1Seconds
,DATEDIFF(SECOND,{d'2000-01-01'},Elapsed2) AS Elapsed2Seconds
,DATEDIFF(SECOND,{d'2000-01-01'},Elapsed2)
-DATEDIFF(SECOND,{d'2000-01-01'},Elapsed1) AS DifferenceSeconds
,DATEADD(SECOND,DATEDIFF(SECOND,{d'2000-01-01'},Elapsed2)
-DATEDIFF(SECOND,{d'2000-01-01'},Elapsed1),{d'2000-01-01'}) AS AddedTimespans
FROM #test;
DROP TABLE #test;
```
|
SQL add Two date variables (Date + Date)
|
[
"",
"sql",
"sql-server",
"t-sql",
""
] |
I want to populate a table with data from a staging table. The interesting column in the staging table has the datatype `text` but is otherwise filled with either values that are parsable as doubles or are the empty string (ie `"4.209"`, `"42"` or `""`). The according column in the destination table has the data type `double`.
The SQL Statement I am executing is
```
insert into dest (.., theColumn, ... ) select ...., theColumn, .. from src
```
When I execute the statement (using ADO) I receive a `Data type mismatch in criteria expression` error).
If I replace `theColumn` with `null`, it works without error. So, I figure I should somehow convert empty strings to `null`s. Is this possible?
|
Use an `IIf()` expression: if `theColumn` contains a string which represents a valid number, return that number; otherwise return Null.
```
SELECT IIf(IsNumeric(theColumn), Val(theColumn), Null) FROM src
```
My first impulse was to use `IsNumeric()`. However I realized this is a more direct translation of what you requested ...
```
SELECT IIf(theColumn='', Null, Val(theColumn)) FROM src
```
|
Convert empty strings to zero maybe also work.
```
insert into dest (.., theColumn, ... ) select ...., theColumn+0, .. from src
```
|
How do I convert an empty string into a numerical null in Access SQL_
|
[
"",
"sql",
"ms-access",
"ado",
""
] |
I want to create a view named saledetailfortax and it will consist 13 columns. They are saledetaildate, saledetailtime, shopid, productid, unitid, expdate, batchno, mrp, totalprice, qty, looseqty, priceperunit and taxid.
My query is:
```
CREATE OR REPLACE VIEW saledetailfortax2 AS
select sd.saledetaildate, sd.saledetailtime, sd.shopid, sd.productid,
sd.unitid, sd.expdate, sd.batchno, sd.mrp, sd.totalprice, sd.qty,
sd.looseqty, sd.unitprice as priceperunit, ord.taxid
from saledetail sd
left JOIN distinctPriceperunit ord
ON sd.productid = ord.productid
AND sd.expdate = ord.expdate
AND sd.batchno = ord.batchno
AND sd.mrp = ord.mrp
AND sd.unitprice = ord.priceperunit
where sd.saledetaildate >= '2016-04-01'
order by sd.saledetaildate , sd.saledetailtime
```
The problem is when there are two taxid with same productid , expdate , batchno ,
mrp and unitprice then there are two records comes with same thing.
Suppose in saledetail table a record consist same productid, expdate, batchno, mrp and unitprice, but the productid have two taxid in distinctPriceperunit table then when left join occure it comes out with two records. But there only one record show with either of two taxid.
So how to eliminate duplicate record .
View distinctpriceperunit (all are distinct values) :
SELECT DISTINCT od.productid, od.unitid, od.priceperunit, od.expdate, od.mrp, od.batchno, od.taxid
FROM orderreceivedetail od
ORDER BY od.productid, od.unitid, od.priceperunit, od.expdate, od.mrp, od.batchno, od.taxid;
TABLE saledetail
(
saledetailid character varying(20) NOT NULL,
saledetaildate date,
saledetailtime timestamp without time zone,
shopid integer,
productid integer,
qty integer,
unitid integer,
unitprice numeric,
discperc numeric,
discamt numeric,
expdate date,
mrp numeric,
mfdate date,
batchno character varying(50),
totalprice numeric,
isreturned boolean,
userid integer,
saleid character varying(20),
isloose boolean,
looseqty integer,
CONSTRAINT saledetail\_pkey PRIMARY KEY (saledetailid)
)
|
`GROUP BY` solution:
```
CREATE OR REPLACE VIEW saledetailfortax2 AS
select sd.saledetaildate, sd.saledetailtime, sd.shopid, sd.productid,
sd.unitid, sd.expdate, sd.batchno, sd.mrp, sd.totalprice, sd.qty,
sd.looseqty, sd.unitprice as priceperunit, MAX(ord.taxid)
from saledetail sd
left JOIN distinctPriceperunit ord
ON sd.productid = ord.productid
AND sd.expdate = ord.expdate
AND sd.batchno = ord.batchno
AND sd.mrp = ord.mrp
AND sd.unitprice = ord.priceperunit
where sd.saledetaildate >= '2016-04-01'
group by sd.saledetaildate, sd.saledetailtime, sd.shopid, sd.productid,
sd.unitid, sd.expdate, sd.batchno, sd.mrp, sd.totalprice, sd.qty,
sd.looseqty, sd.unitprice
order by sd.saledetaildate, sd.saledetailtime
```
Correlated sub-query solution:
```
CREATE OR REPLACE VIEW saledetailfortax2 AS
select sd.saledetaildate, sd.saledetailtime, sd.shopid, sd.productid,
sd.unitid, sd.expdate, sd.batchno, sd.mrp, sd.totalprice, sd.qty,
sd.looseqty, sd.unitprice as priceperunit,
(select max(taxid) from distinctPriceperunit ord
WHERE sd.productid = ord.productid
AND sd.expdate = ord.expdate
AND sd.batchno = ord.batchno
AND sd.mrp = ord.mrp
AND sd.unitprice = ord.priceperunit)
from saledetail sd
where sd.saledetaildate >= '2016-04-01'
order by sd.saledetaildate, sd.saledetailtime
```
|
You could use a GROUP BY these columns productid , expdate , batchno , mrp and unitprice.
|
How to eliminate duplicate record in left join?
|
[
"",
"sql",
"postgresql",
""
] |
Below is the query I'm working with - I need to get the MAX date for each of the PhaseEndDt. I've tried the
```
(SELECT Max(v) FROM (VALUES (aphase1.updated_ts), (aphase2.updated_ts), (aphase3.updated_ts)) AS VALUE(v)) AS [MaxDate]
```
but it isn't working :-( any help would be greatly appreciated!
```
SELECT
dc.case_id,
aphase1.identifier_value AS "phase1",
aphase1.updated_ts AS "phase1_enddt",
aphase2.identifier_value AS "phase2",
aphase2.updated_ts AS "phase2_enddt",
aphase3.identifier_value AS "phase3",
APHASE3.UPDATED_TS AS "Phase3_EndDt"
FROM cmreporting.d_solution ds
INNER JOIN cmreporting.d_case dc ON ds.solution_sqn = dc.solution_sqn
LEFT JOIN cmreporting.a_identifiers aphase1 ON aphase1.identifier_value = 'Phase 1' AND dc.case_ID = aphase1.group_ID
LEFT JOIN cmreporting.a_identifiers aphase2 ON aphase2.identifier_value = 'Phase 2' AND dc.case_ID = aphase2.group_ID
LEFT JOIN cmreporting.a_identifiers aphase3 ON aphase3.identifier_value = 'Phase 3' AND dc.case_ID = aphase3.group_ID
```
|
you can do it much easier, and probably faster:
```
WITH
parms as (select 'Phase 1' AS "Phase1", 'Phase 2' AS "Phase2", 'Phase 3' AS "Phase3", '{091225F8-4606-401C-872E-FC5ACDC1D8E2}' AS case_id from dual)
SELECT
dc.case_id,
parms."Phase1",
SELECT Max(updated_ts) FROM a_identifiers WHERE identifier_value = parms."Phase1" AND group_ID = dc.case_ID) AS "Phase1Enddt",
parms."Phase2",
SELECT Max(updated_ts) FROM a_identifiers WHERE identifier_value = parms."Phase2" AND group_ID = dc.case_ID) AS "Phase2Enddt",
parms."Phase3",
SELECT Max(updated_ts) FROM a_identifiers WHERE identifier_value = parms."Phase3" AND group_ID = dc.case_ID) AS "Phase3Enddt",
FROM parms, cmreporting.d_solution ds
INNER JOIN cmreporting.d_case dc ON ds.solution_sqn = dc.solution_sqn
WHERE dc.case_id = parms.case_id
AND rownum = 1 --if ther is more then one row
```
EDIT:
the same result you can get by this query:
```
WITH
parms as (select 'Phase 1' AS "Phase1", 'Phase 2' AS "Phase2", 'Phase 3' AS "Phase3", '{091225F8-4606-401C-872E-FC5ACDC1D8E2}' AS case_id from dual)
SELECT
group_ID as case_id,
parms."Phase1",
Max(Case When identifier_value = parms."Phase1" Then updated_ts End) AS "Phase1Enddt",
parms."Phase2",
Max(Case When identifier_value = parms."Phase2" Then updated_ts End) AS "Phase2Enddt",
parms."Phase3",
Max(Case When identifier_value = parms."Phase3" Then updated_ts End) AS "Phase3Enddt",
FROM parms, a_identifiers
Where Exists (Select 1 From cmreporting.d_solution ds
INNER JOIN cmreporting.d_case dc ON ds.solution_sqn = dc.solution_sqn
WHERE dc.case_id = a_identifiers.group_ID)
AND group_ID = parms.case_ID
GROUP BY group_ID
```
|
if you only need max of updated\_ts you can use subquery or case:
```
SELECT
dc.case_id,
aphase1.identifier_value AS "phase1",
aphase1.updated_ts AS "phase1_enddt",
aphase2.identifier_value AS "phase2",
aphase2.updated_ts AS "phase2_enddt",
aphase3.identifier_value AS "phase3",
aphase3.updated_ts AS "phase3_enddt",
(SELECT MAX(updated_ts) FROM cmreporting.a_identifiers WHERE group_ID = dc.case_ID AND identifier_value IN('Phase 1', 'Phase 2', 'Phase 3')) AS max_updated_ts_subquery,
CASE WHEN aphase1.updated_ts > aphase2.updated_ts AND aphase1.updated_ts > aphase3.updated_ts THEN aphase1.updated
WHEN aphase2.updated_ts > aphase3.updated_ts THEN aphase2.updated_ts
ELSE aphase3.updated_ts END AS max_updated_ts_case,
(SELECT MAX(col) FROM (VALUES(aphase1.updated_ts), (aphase2.updated_ts), (aphase3.updated_ts)) AS tab(col)) AS max_updated_ts_from_values
FROM cmreporting.d_solution ds
INNER JOIN cmreporting.d_case dc ON ds.solution_sqn = dc.solution_sqn
LEFT JOIN cmreporting.a_identifiers aphase1 ON aphase1.identifier_value = 'Phase 1' AND dc.case_ID = aphase1.group_ID
LEFT JOIN cmreporting.a_identifiers aphase2 ON aphase2.identifier_value = 'Phase 2' AND dc.case_ID = aphase2.group_ID
LEFT JOIN cmreporting.a_identifiers aphase3 ON aphase3.identifier_value = 'Phase 3' AND dc.case_ID = aphase3.group_ID
```
EDIT:
but for oracle use [GREATEST](https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions060.htm)
like this:
```
SELECT
dc.case_id,
aphase1.identifier_value AS "phase1",
aphase1.updated_ts AS "phase1_enddt",
aphase2.identifier_value AS "phase2",
aphase2.updated_ts AS "phase2_enddt",
aphase3.identifier_value AS "phase3",
aphase3.updated_ts AS "phase3_enddt",
GREATEST(aphase1.updated_ts, aphase2.updated_ts, aphase3.updated_ts) AS MaxDate
FROM cmreporting.d_solution ds
INNER JOIN cmreporting.d_case dc ON ds.solution_sqn = dc.solution_sqn
LEFT JOIN cmreporting.a_identifiers aphase1 ON aphase1.identifier_value = 'Phase 1' AND dc.case_ID = aphase1.group_ID
LEFT JOIN cmreporting.a_identifiers aphase2 ON aphase2.identifier_value = 'Phase 2' AND dc.case_ID = aphase2.group_ID
LEFT JOIN cmreporting.a_identifiers aphase3 ON aphase3.identifier_value = 'Phase 3' AND dc.case_ID = aphase3.group_ID
```
|
Need multiple maxdates-oracle sql developer 4.0.2
|
[
"",
"sql",
"oracle",
""
] |
i dont have much expiriance with SQL and i am trying to crack my head on this query.
i have 3 tables: Projects, Calculator and partialBilling
(note: the 'calculator' columns you see at the code ive added 'k','l','m' etc are real...i didnt gave them those names...).
the query is working fine but **part** of the values that i am expecting from the aggregate function ('sumofTotal' column) are returning as null values and and they should not be null.
I would be grateful if someone point out the mistake in the query.
```
SELECT Projects.SpCall,Projects.CustName,Projects.CustNumber
,Projects.ReceiveDate,Projects.StartDate,Projects.ProjectType
,Calculator.AN,Projects.Professional,Projects.PmUserName
,Projects.AcountManager,Projects.CrmCallNum,Projects.ProjectCategory
,Projects.CallNum,Projects.ContactName,Projects.ContactPhone
,Projects.ContactEmail,Projects.HiddenNote,Projects.RowColor
, Projects.HeaderCellText,
SUM(Calculator.K + Calculator.L + Calculator.M + Calculator.N + Calculator.AD + Calculator.AR) AS sumofTotal
,partialBilling.Ammount FROM Projects LEFT JOIN Calculator ON Projects.SpCall=Calculator.AQ
LEFT JOIN partialBilling ON Projects.SpCall = partialBilling.spCall
WHERE PmUserName= 'JOHN DOE'AND OpertionalStatus
<> 'Billed' AND OpertionalStatus<> 'Finished' AND
OpertionalStatus<> 'Passed To Billing' AND OpertionalStatus<> 'Scanning'
AND OpertionalStatus<> 'Ended'
AND OpertionalStatus<> 'Green Billing'
AND (GeneralStatus= 'Passed To Project Manager'
OR GeneralStatus= 'Astrategic Project')
GROUP BY Projects.SpCall,Projects.CustName,Projects.CustNumber
,Projects.ReceiveDate,Projects.StartDate,Projects.ProjectType
,Calculator.AN,Projects.Professional,Projects.PmUserName
,Projects.AcountManager,Projects.CrmCallNum,Projects.ProjectCategory
,Projects.CallNum,Projects.ContactName,Projects.ContactPhone
,Projects.ContactEmail,Projects.HiddenNote,Projects.RowColor
, Projects.HeaderCellText,partialBilling.Ammount;
```
|
Instead of proprietary `IFNULL` better use Standard SQL `COALESCE`:
```
SUM(COALESCE(Calculator.K,0) + COALESCE(Calculator.L,0), ...`
```
Or maybe a bit more efficient:
```
SUM(COALESCE(Calculator.K,0)) + SUM(COALESCE(Calculator.L,0)), ...`
```
|
Try to use IFNULL()
```
SUM(IFNULL(Calculator.K,0) + ... + IFNULL(Calculator.AR,0)) AS sumofTotal
```
|
SQL query: NULL values that should not be NULL when using aggregate function with left join
|
[
"",
"mysql",
"sql",
"left-join",
"aggregate-functions",
""
] |
I have two tables
Table 1:
```
process status
1 completed
2 completed
3 not completed
```
`table 2` ; is a history table that gets its data from table1
```
process status
1 completed
2 completed
3 not completed
```
Next time when the data gets pushed into history table from table 1 i want only that data to be pushed whose status is 'not completed'
Such that my history table looks like below
Table 2 (history table)
```
Process status
1 completed
2 completed
3 not completed
3 completed
```
Please help me out with this, I have tried subqueries but it isnt working out for me.
I used this query:
```
INSERT INTO table2
SELECT *
FROM table1
WHERE NOT (status IN (SELECT Status
FROM table2
WHERE status = 'completed'))
```
|
Try this.
```
INSERT INTO table2 (process,status)
select process,'completed' as status From table1 where process in
(select distinct process from table2 where status <> 'completed')
```
|
Use this if you want to **match some column** in **both** the **Tables**:
```
UPDATE _table_name_
SET col1 = _alias_table_.col1, ... /* all columns except merge keys and Identity column */
FROM (SELECT * FROM _tmp_table1_ UNION SELECT * FROM _tmp_table2_) _alias_table_
WHERE _table_name_._merge_key_ = _alias_table_._merge_key_ /* could be multiple merge keys */
```
**Example:**
```
update orderdetails
set productname = a.productname
from (select productid,productname from products) as a
join orderdetails o
on o.productid = a.productid
where a.productid = o.productid;
```
|
Insert into..select with condition
|
[
"",
"sql",
"sql-server",
"select",
"insert",
""
] |
There are two tables
```
DEPT (
DEPT_ID NUMBER(5) PRIMARY KEY,
DEPT_NAME VARCHAR2(10)
);
COURSE (
COURSE_ID NUMBER(5) PRIMARY KEY,
COURSE_NAME VARCHAR2(15),DEPT_ID NUMBER(5),
FOREIGN KEY(DEPT_ID) REFERENCES DEPT
)
```
I want to change the size equal to `5` of the column `DEPT_ID` which has a `FOREIGN KEY` constraint.
I tried changing but it gives error:
> ORA-02267: column type incompatible with referenced column type
Which is because it violates the foreign key constraint.
**I didn't supplied any name to the foreign key while creating tables**. So how can I do it without dropping any table.??
|
I think you need to do the following:
* drop the foreign key constraints to the tables (you can use `alter table drop constraint`).
* change the data types in all the tables (you can use `alter table modify column`)
* add the foreign key constraints back (`alter table add constraint`)
This doesn't require dropping any tables.
|
You have to drop the foreign key constraint first. Then execute the command you mentioned. Then add back the constraint.
|
How to change the size of a column with FOREIGN KEY constraint?
|
[
"",
"sql",
"oracle",
"oracle10g",
""
] |
I need to create a range number from 1 to n.
For example, the parameter is `@StartingValue`
```
@StartingValue int = 96
```
Then the result should be:
```
Number
-------------
96
95
94
93
92
ff.
1
```
Does anyone have an idea how to do this?
Thank you.
|
Use a Jeff Moden's [**Tally Table**](http://www.sqlservercentral.com/articles/T-SQL/62867/) to generate the numbers:
```
DECLARE @N INT = 96
;WITH E1(N) AS( -- 10 ^ 1 = 10 rows
SELECT 1 FROM(VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1))t(N)
),
E2(N) AS(SELECT 1 FROM E1 a CROSS JOIN E1 b), -- 10 ^ 2 = 100 rows
E4(N) AS(SELECT 1 FROM E2 a CROSS JOIN E2 b), -- 10 ^ 4 = 10,000 rows
E8(N) AS(SELECT 1 FROM E4 a CROSS JOIN E4 b), -- 10 ^ 8 = 100,000,000 rows
CteTally(N) AS(
SELECT TOP(@N) ROW_NUMBER() OVER(ORDER BY(SELECT NULL))
FROM E8
)
SELECT * FROM CteTally ORDER BY N DESC
```
---
Explanation taken from Jeff's article (linked above):
> The CTE called E1 (as in 10E1 for scientific notation) is nothing more
> than ten SELECT 1's returned as a single result set.
>
> E2 does a CROSS JOIN of E1 with itself. That returns a single result
> set of 10\*10 or up to 100 rows. I say "up to" because if the TOP
> function is 100 or less, the CTE's are "smart" enough to know that it
> doesn't actually need to go any further and E4 and E8 won't even come
> into play. If the TOP has a value of less than 100, not all 100 rows
> that E2 is capable of making will be made. It'll always make just
> enough according to the TOP function.
>
> You can follow from there. E4 is a CROSS JOIN of E2 and will make up
> to 100\*100 or 10,000 rows and E8 is a CROSS JOIN of E4 which will make
> more rows than most people will ever need. If you do need more, then
> just add an E16 as a CROSS JOIN of E8 and change the final FROM clause
> to FROM E16.
>
> What's really amazing about this bad-boy is that is **produces ZERO
> READS**. Absolutely none, nada, nil.
|
One simple method is a numbers table. For a reasonable number (up to the low thousands), you can use `spt_values`:
```
with numbers as (
select top 96 row_number() over (order by (select null)) as n
from t
)
. . .
```
Another method is a recursive CTE:
```
with numbers as (
select 96 as n
union all
select n - 1
from numbers
where num > 1
)
```
For larger values, you'll need to use the `MAXRECURSION` option.
|
Create a Range From n to 1 in SQL
|
[
"",
"sql",
"sql-server",
"t-sql",
"sql-server-2008-r2",
""
] |
Have a table in Oracle db such like this:
```
Word Cnt
A 20
B 25
C 23
B 29
D 31
```
What I trying to do - is to add an additional column with id of a word. But it is not a primary key it wont be unique because of repeating words. So the outcome I'm looking for is:
```
Word Cnt ID
A 20 1
B 25 2
C 23 3
B 29 2
D 31 4
```
How can I perform that in Oracle SQL?
|
You can use a window function to calculate the ID:
```
select word,
cnt,
dense_rank() over (order by word) as id
from the_table;
```
You can update the table using the above, if you really need to persist that:
```
merge into the_table tg
using (
select rowid as rid,
dense_rank() over (order by word) as new_id
from the_table
) t on (t.rid = tg.rowid)
when matched then update
set id = t.new_id;
```
|
My solution requires a lot of nested subqueries, but it works...
```
alter table mytable add (id number(12));
update mytable
set id = (select n from
(select word, rownum n from
(select word from mytable group by word order by word)
) x where mytable.word = x.word
);
```
|
Add column with id in Oracle SQL
|
[
"",
"sql",
"oracle",
""
] |
I have the following table:
```
FileName | SubFileName | TotalPlayersCount |
-------------------------------------------
AAA | SF1 | 11 |
AAA | SF2 | 5 |
AAA | SF3 | 3 |
BBB | SF1 | 8 |
BBB | SF2 | 15 |
BBB | SF3 | 2 |
CCC | SF1 | 5 |
CCC | SF2 | 10 |
CCC | SF3 | 20 |
```
As you can see, each `FileName` has 3 different `SubfileName` ('SF1', 'SF2', 'SF3').
Each of these `SubfileName` have a different value for `TotalPlayersCount`.
I am trying to select the max value of the column `TotalPlayersCount` out of the three `SubfileName`, and this, *FOREACH* `FileName`.
The result should be:
```
FileName | SubFileName | TotalPlayersCount |
-------------------------------------------
AAA | SF1 | 11 |
BBB | SF2 | 15 |
CCC | SF3 | 20 |
```
I tried myself a couple of queries and this is the closest I've come to:
```
select distinct FileName, max(TotalPlayersCount) AS TotalPlayersCount
from dbo.MyTestTable
group by FileName
```
This is the result I get:
```
FileName | TotalPlayersCount |
------------------------------
AAA | 11 |
BBB | 15 |
CCC | 20 |
```
So now I'm missing the `SubfileName` in the result.
Could you help me finding what's missing?
Thanks in advance.
|
Easy way, use `NOT EXISTS` to return a row if there are no other row with same FileName that has a higher TotalPlayersCount:
```
select FileName, SubFileName, TotalPlayersCount
from dbo.MyTestTable t1
where not exists (select 1 from dbo.MyTestTable t2
where t2.FileName = t1.FileName
and t2.TotalPlayersCount > t1.TotalPlayersCount)
```
`JOIN` version, perhaps better performance:
```
select t1.FileName, t1.SubFileName, t1.TotalPlayersCount
from dbo.MyTestTable t1
JOIN (select FileName, max(TotalPlayersCount) as TotalPlayersCount
from dbo.MyTestTable
group by FileName) t2
ON t2.FileName = t1.FileName
and t2.TotalPlayersCount = t1.TotalPlayersCount
```
Both queries will return both rows in case of a tie!
|
```
select fileName,subFileName,max(TotalPlayersCount )
from table_name
group by filename
```
|
Compare rows and select max value
|
[
"",
"mysql",
"sql",
"foreach",
"max",
""
] |
How can I connect two grouped below query?
```
select [Fiscal Year],[Fiscal Quater],sum([colX])as X
from table1
group by [Fiscal Year],[Fiscal Quater];
select [Fiscal Year],[Fiscal Quater],sum([colY]) as Y
from table2
group by [Fiscal Year],[Fiscal Quater];
```
result should return column:
[Fiscal Year],[Fiscal Quater], X, Y
|
You probably want something like this:
```
select coalesce(t1.[Fiscal Year], t2.[Fiscal Year]) as [Fiscal Year],
coalesce(t1.[Fiscal Quater], t2.[Fiscal Quater]) as [Fiscal Quater],
coalesce(t1.X, 0) as X,
coalesce(t2.Y, 0) as Y
from (
select [Fiscal Year], [Fiscal Quater], sum([colX]) as X
from table1
group by [Fiscal Year], [Fiscal Quater]) as t1
full join (
select [Fiscal Year], [Fiscal Quater], sum([colY]) as Y
from table2
group by [Fiscal Year], [Fiscal Quater]
) as t2 on t1.[Fiscal Year] = t2.[Fiscal Year] and
t1.[Fiscal Quater] = t2.[Fiscal Quater]
```
|
`UNION ALL` the two tables together in a derived table, do `GROUP BY` at main level:
```
select [Fiscal Year], [Fiscal Quater], sum([colX])as X, sum([colY]) as Y
from
(
select [Fiscal Year], [Fiscal Quater], colX, NULL as colY from table1
UNION ALL
select [Fiscal Year], [Fiscal Quater], NULL as colX, colY from table2
)
group by [Fiscal Year],[Fiscal Quater]
```
Alternatively, do double `GROUP BY`:
```
select [Fiscal Year], [Fiscal Quater], sum(X), sum(Y)
from
(
select [Fiscal Year], [Fiscal Quater], sum([colX]) as X, NULL as Y
from table1
group by [Fiscal Year],[Fiscal Quater];
UNION ALL
select [Fiscal Year], [Fiscal Quater], NULL as X, sum([colY]) as Y
from table2
group by [Fiscal Year],[Fiscal Quater]
)
group by [Fiscal Year],[Fiscal Quater]
```
Perhaps optimized differently?
|
Combine two grouped query into one select
|
[
"",
"sql",
"sql-server",
"sql-server-2008",
"group-by",
""
] |
If I have a user that can be associated with multiple keys would the proper table setup be:
1. One table with two columns such as:
```
UserName | Key
```
where there is no primary key and a user can have multiple rows, or:
2. Two tables with an matching identifier
```
Table 1 UserName | UserId
Table 2 Key | UserId
```
where `UserId` is the primary key of table1 and the foreign key of table 2.
Which way is more preferred if I wanted to find all the keys associated with a user?
|
If you wanted to find all the keys associated with a given user you might use the following `JOIN` query:
```
SELECT Key
FROM keys k INNER JOIN users u
ON k.UserId = u.UserId
WHERE u.UserName = 'username'
```
The place which would benefit most from an index in this case would be the `UserId` columns in the two tables. If this index existed, then, for a given user, looking up keys in the `Key` table would require roughly constant time.
Without any indices, then MySQL will have to do a full table scan for each user, as it tries to find keys corresponding to that user.
|
Without an understanding of the entities and attributes you are attempting to model, it's not really possible to give you an answer to the question you asked.
**Entity Relationship Modeling**
What *entities* does your data model represent? An entity is a person, place, thing, concept or event that can be uniquely identified, is important to the system, and we can store information about.
From the description given in the question, we are thinking that a "user" is an entity. And maybe "key" is also an entity. We can't really tell from the description whether that's an entity, or whether it's a repeating (multi-valued) attribute.
What uniquely identifies a "user"?
What attributes do we need/want to store about a "user"?
The second part is understanding the **relationships** between the entities.
To do that, we need to ask and get answers to some questions, such as:
How many "users" can be associated with a specific "key"?
Does a "key" have to be related to a user, or can a key be related to zero users?
Can a "key" be uniquely identified, apart from a user?
And so on.
Based on those answers, we can start to put together a model, and evaluate how well that model represents the problem, and how well that model is going to work for our expected use cases.
If both "user" and "key" are entities, and there is a many-to-many relationship between the entities, the model for that is going to look different than if "key" is not an entity, but just a multi-valued attribute.
If a key must "belong" to one and only one user, and a user can "hold" zero, one or more keys, likely it's a multivalued attribute. Then we need two tables. One "parent" table for the "user" entity, and another "child" table to store the repeating attribute.
We don't know (yet) what set of attributes uniquely identifies a user, so we'll represent that with a generic "userid" attribute of some unspecified datatype.
```
user
-----
userid datatype NOT NULL PRIMARY KEY
name varchar(30) NOT NULL
```
e.g.
```
userid name
------ ------
psimon paul simon
agarfu art garfunkel
```
To store a multi-valued attribute, we use the PRIMARY KEY of the entity table as a foreign key in our second "child" table.
```
user_key
--------
userid datatype NOT NULL FOREIGN KEY ref user.userid
key VARCHAR(30) NOT NULL
```
e.g.
```
user_key
userid key
------- -------
psimon G major
psimon A major
psimon A major
psimon B minor
agarfu A major
```
If we decide that "user" will have a different column as the primary key, then we'd use that same column as the foreign key in the child table.
In this example, we've allowed "duplicate" values for "key" for a given user. If we only want distinct values of "key" for a user, we'd need to add a UNIQUE constraint on the `(userid, key)` tuple.
---
Before we get too worried about performance, we need to concerned with getting some workable data models. From there, we can translate that into some implementations, and evaluate performance characteristics of each of those.
If the implementation has tables that don't have a suitable primary key, we can introduce another column to stand in as a "surrogate" primary key.
|
Performance of primary/foreign key versus single table with no primary key
|
[
"",
"sql",
"sql-server",
"database",
""
] |
Recently I've been developing a leave management system. In this application I need a report like in a month wise employee leave statement.
So here's my sample table:
```
Employee Id application Date Start Date End Date
20130002 14-Mar-2016 16-Mar-2016 17-Mar-2016
20130012 15-Mar-2016 29-Mar-2016 2-Apr-2016
20130003 14-Mar-2016 15-Mar-2016 16-Mar-2016
20130005 10-Mar-2016 24-Mar-2016 24-Mar-2016
20130002 10-Mar-2016 20-Mar-2016 25-Mar-2016
20130006 13-Mar-2016 8-Mar-2016 17-Mar-2016
20130001 14-Mar-2016 4-Apr-2016 24-Apr-2016
20130003 15-Mar-2016 16-May-2016 18-May-2016
20130011 10-Mar-2016 7-Jun-2016 7-Jun-2016
```
Now I need a report where I can get month wise this report. Suppose I need only March's data, like this:
```
<pre>
Employee Id application Date Start Date End Date
20130002 14-Mar-2016 16-Mar-2016 17-Mar-2016
20130012 15-Mar-2016 29-Mar-2016 31-Mar-2016
20130003 14-Mar-2016 15-Mar-2016 16-Mar-2016
20130005 10-Mar-2016 24-Mar-2016 24-Mar-2016
20130002 10-Mar-2016 20-Mar-2016 25-Mar-2016
20130006 13-Mar-2016 8-Mar-2016 17-Mar-2016
</pre>
```
How can I achieve this - by PL/SQL or any SQL language?
|
Assuming that the time component of the dates is set to `00:00:00` then:
```
SELECT EmployeeId,
application_date,
GREATEST( start_date, DATE '2016-03-01' ) AS start_date,
LEAST( end_date, DATE '2016-03-31' ) AS end_date
FROM table_name
WHERE Start_date <= DATE '2016-03-31'
AND end_date >= DATE '2016-03-01'
```
You can use a bind variable to replace the hard-coded dates like this:
```
SELECT EmployeeId,
application_date,
GREATEST( start_date, :month_start ) AS start_date,
LEAST( end_date, LAST_DAY( :month_start ) ) AS end_date
FROM table_name
WHERE Start_date <= LAST_DAY( :month_start )
AND end_date >= :month_start
```
If you have time components then:
```
SELECT EmployeeId,
application_date,
GREATEST( start_date, :month_start ) AS start_date,
LEAST( end_date, :month_start + INTERVAL '1' MONTH - INTERVAL '1' SECOND )
AS end_date
FROM table_name
WHERE Start_date < :month_start + INTERVAL '1' MONTH
AND end_date >= :month_start
```
|
This is for SQL Server
```
SELECT *
FROM Leaves
WHERE MONTH(StartDate) <= 4 and Month(EndDate) >= 4
```
For Oracle
```
SELECT *
FROM Leaves
WHERE EXTRACT(month FROM StartDate) <= 4 and EXTRACT(month FROM EndDate) >= 4
```
|
How to get all info about start date to end date within a given date range?
|
[
"",
"sql",
"oracle",
"plsql",
""
] |
I'm returning total sales for a period of time for each country. Sometimes a country will not appear in the results because they haven't had any orders during that time period. For these countries with no sales, I would like to include in the results the countries abbreviated name and sales total with a value of '0'. For example, NL and IS should also be included in the results with Sales\_Total both with a value of '0'. How would I include those dummies rows in the results when the country hasn't had any sales for the period?
```
**QUERY:**
SELECT
Country,
SUM(TOTAL) AS Sales_Total
FROM Orders
WHERE OrderDate BETWEEN '2014-01-01' AND '2014-12-31'
GROUP BY Country
**RESULTS**
Country Total_Sales
AU 7646
CA 13773
KR 13976
NZ 1831
US 69421
**Required Results:**
Country Total_Sales
AU 7646
CA 13773
KR 13976
NZ 1831
US 69421
NL 0
IS 0
```
|
This should do it:
```
SELECT Country
, Sales_Total=ISNULL(Sales_Total,0)
FROM
(SELECT o.Country
, SUM(TOTAL) AS Sales_Total
FROM Orders
WHERE OrderDate BETWEEN '2014-01-01' AND '2014-12-31'
GROUP BY Country) AS o
RIGHT OUTER JOIN
(SELECT DISTINCT
Country
FROM Orders) AS C ON o.Country = c.Country;
```
|
I would use (create if needed) a country table you could outer join from. Then you can write like so;
```
SELECT
c.CountryCode,
SUM(TOTAL) AS Sales_Total
FROM Country c
LEFT JOIN Orders o
in c.CountryCode = o.Country AND o.OrderDate BETWEEN '2014-01-01' AND '2014-12-31'
GROUP BY c.CountryCode
```
|
Microsoft SQL: include dummy row(s) in results when value doesn't exist
|
[
"",
"sql",
"sql-server",
""
] |
```
select mydate
from Tble_xxx
where CONVERT(varchar(20), mydate, 120)
BETWEEN CONVERT(varchar(20), (@startdate, 'yyyy-MM-dd HH:m:ss:mmm', 'en-US') , 120)
AND CONVERT(varchar(20), (@enddate, 'yyyy-MM-dd HH:m:ss:mmm', 'en-US'), 120)
```
When I am try to get date between to data and change the Specific Format datetime, then error occured. please Help me...
```
select mydate
from Tble_xxx
where CONVERT(varchar(20), mydate, 120)
BETWEEN CONVERT(varchar(20), (N'08/01/2015 12:00:00', 'yyyy-MM-dd HH:m:ss:mmm', 'en-US') , 120)
AND CONVERT(varchar(20), (N'06/04/2015 12:00:00', 'yyyy-MM-dd HH:m:ss:mmm', 'en-US'), 120)
```
|
Try this
```
DECLARE @startdate DATETIME='08/01/2015 12:00:00'
DECLARE @enddate DATETIME='06/04/2015 12:00:00'
select mydate
from Tble_xxx
where CAST(mydate AS DATE) BETWEEN CAST(@startdate AS DATE) AND CAST(@enddate AS DATE)
```
|
SQL server supports implicit conversion between character types (char, nchar, varchar, nvarchar) and date/time types (datetime, date, time, etc), so you do not need to use explicit `Convert` statements.
See the Data Type Conversion chart here: <https://msdn.microsoft.com/en-gb/library/ms191530.aspx>
For character/datetime conversions, it also assumes the character data will be in 'yyyy-mm-dd hh:mm:ss.mmmm' (ODBC canonical format).
> yyyy-mm-dd hh:mi:ss.mmm(24h)
>
> ODBC canonical (with milliseconds) default for time, date, datetime2, and datetimeoffset
See the "Remarks" section for date and time styles here: <https://msdn.microsoft.com/en-GB/library/ms187928.aspx>
So - assuming your "myDate" field is a datetime or date type - your SQL statement can be as simple as:
```
SELECT mydate
FROM Tble_xxx
WHERE mydate BETWEEN '2015-01-08 12:00:00' AND '2015-04-06 12:00:00'
```
If, as you mention in a comment to one of the other answers, your dates are already being passed in as datetime parameters, you can just use:
```
SELECT mydate
FROM Tble_xxx
WHERE mydate BETWEEN @startdate AND @enddate
```
|
error Display Datetime in Specific Format
|
[
"",
"sql",
"sql-server",
""
] |
I'm trying to join two tables like this:
Table A
```
ID Value1
1 A
2 B
3 C
```
Table B
```
ID Value2
1 A
3 B
4 C
```
Result should be:
```
ID Value1 Value2
1 A A
2 B null
3 C B
4 null C
```
I.e. join Table A to Table B on ID. If ID doesn't exist in Table A, add the ID from Table B.
The closest I've come is:
```
SELECT
a.ID, a.Value1, b.Value2
FROM
TableA a
OUTER JOIN
TableB b ON a.ID = b.ID
```
That gives me the new rows from TableB, but the ID is null.
How can I accomplish this?
|
You are very close, you just need a little push in the right direction:
```
SELECT COALESCE(a.ID, B.ID) As ID, a.Value1, b.Value2
FROM TableA a
FULL OUTER JOIN TableB b ON a.ID=b.ID
```
The `COALESCE` function returns the first parameter it gets that is not null. since this is a full outer join, `a.id` will be null on one row and `b.id` would be null on a different row.
|
Try this:
```
SELECT *
FROM TableA A
FULL OUTER JOIN TableB B
ON A.ID = B.ID;
```
Just a note: you should not name your tables in SQL with spaces in them.
|
SQL - not sure how to join tables
|
[
"",
"sql",
"oracle",
""
] |
I have an app that cannot display any year past 2016 in a drop down.
At the same time, and as time goes on, I need to display past years.
For example:
* in the year 2017, I will need to display 2016 and 2017.
* In the year 2018, I will need to display 2016, 2017, and 2018.
* And so on
So I have developed the following code to accomplish this.
Do you know of a better way to accomplish this in TSQL?
```
SELECT [Year] FROM (
SELECT YEAR(GETDATE()) AS 'Year'
UNION
SELECT YEAR(DATEADD(year,-1,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2017
UNION
SELECT YEAR(DATEADD(year,-2,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2018
UNION
SELECT YEAR(DATEADD(year,-3,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2019
UNION
SELECT YEAR(DATEADD(year,-4,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2020
UNION
SELECT YEAR(DATEADD(year,-5,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2021
UNION
SELECT YEAR(DATEADD(year,-6,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2022
UNION
SELECT YEAR(DATEADD(year,-7,GETDATE())) AS 'Year' WHERE YEAR(GETDATE()) > 2023
) x
ORDER BY [Year] DESC
```
|
Try this, this will generate a list of Year from 2016 to current year
```
WITH CTE_TEST AS(
SELECT 2016 AS NYEAR
UNION ALL
SELECT NYEAR+1
FROM CTE_TEST
WHERE NYEAR+1 <= YEAR(GETDATE())
)
SELECT * FROM CTE_TEST
```
|
Create a Year table that has all valid years you ever want the application to handle.
```
Select Year From Year
Where Year>=2016
AND Year<=DATEPART(Year,GETDATE())
```
|
Display year in the future
|
[
"",
"sql",
"sql-server",
""
] |
This is related to:
[Why is selecting specified columns, and all, wrong in Oracle SQL?](https://stackoverflow.com/questions/2315295/why-is-selecting-specified-columns-and-all-wrong-in-oracle-sql)
The query:
```
select is_parent, animals.* from animals order by is_parent
```
throws the error:
```
[Error] ORA-00960: ambiguous column naming in select list
```
Which makes sense as `is_parent` is duplicated.
Can anyone tell me a simple fix for this, as the original query `select is_parent, animals.* from animals` is very nice and quick to return all other columns. I looked at this [link](http://www.techonthenet.com/oracle/errors/ora00960.php) but the technique to remove the ambiguity does not seem to apply.
|
I have managed to come up with a neat solution thanks to the very helpful comments and answers that say that I will confuse the compiler if I don't use an alias.
I find having to rename the columns cumbersome, for example, if I had selected more columns:
```
select is_parent, age, animals.* from animals order by is_parent, age
```
The quick and dirty solution is to tell Oracle to order using column numbers:
```
select is_parent, age, animals.* from animals order by 1, 2
```
This removes the ambiguity in column names.
|
Something you can try:
1) Use the table name (or its alias) in the ORDER BY clause:
```
SQL> select is_parent, animals.*
2 from animals
3 order by animals.is_parent;
no rows selected
```
2) Write your ordering clause based on the position of fields in your select list:
```
SQL> select is_parent, animals.*
2 from animals
3 order by 1;
no rows selected
```
3) Use an alias for explicitly written columns:
```
SQL> select is_parent as parent, animals.*
2 from animals
3 order by is_parent;
no rows selected
```
Each of these solutions may be more or less readable; the second is the easiest one, and seems to match you need to order by the "most important" fields.
However, I would not recommend any of these in writing an application, using them only for one-shot queries.
|
How to resolve 'ambiguous column naming in select list' when 'select col, t.*' is used with an order by clause
|
[
"",
"sql",
"oracle",
""
] |
My Databases look like so:
**PEAK (NAME, ELEV, DIFF, MAP, REGION)**
**CLIMBER (NAME, SEX)**
**PARTICIPATED (TRIP\_ID, NAME)**
**CLIMBED (TRIP\_ID, PEAK, WHEN)**
* PEAK gives info about the mountain peaks that the user is interested in. The table lists the name of each peak, it elevation(in ft), its difficulty level(on a scale of 1-5), the map that it is located on, and the region of the Sierra Nevada that it is located in.
* CLIMBER lists the members of club, and gives their name and gender.
* PARTICIPATED gives the set of climbers who participated in each of the various climbing trips. The number of participants in each trip varies.
* CLIMBED tells which peaks were climbed on each climbing trip, along w/ the data that each peak was climbed.
I need help w/ writing an SQL query for these 2 example scenarios:
* Which peaks have been climbed by Mark and Mary?
* On which trips did the total elevation gained by all participants exceed 500,000 feet?
This is what I have for the first query:
```
SELECT PEAK
FROM CLIMBED
WHERE TRIP_ID IN
(SELECT TRIP_ID
FROM PARTICIPATED
WHERE NAME IN ('MARK','MARY')
GROUP BY TRIP_ID
HAVING COUNT(*) = 2
);
```
The problem w/ this query is it only gives me all of the peaks that Mark and Mary have climbed during the same trip they took together. I need to somehow get the peaks that they both have climbed, but that they weren't together for.
For the second query I have no clue how to get the COUNT() of each peak that all the participants climbed during the specific TRIP\_ID.
|
For your first question, you should just be able to remove the Having clause to get climbs where either Mark or Mary participated.
```
SELECT PEAK
FROM CLIMBED
WHERE TRIP_ID IN
(SELECT TRIP_ID
FROM PARTICIPATED
WHERE NAME IN ('MARK','MARY')
GROUP BY TRIP_ID
);
```
Leaving the Having clause there means that you need both Mark and Mary to be in the Participated table for a particular trip for the trip\_id to give you the 2 rows mandated by the having clause.
To get the peaks where one has climbed, but not the other, use your original query, but change the having clause to be 1:
```
SELECT PEAK
FROM CLIMBED
WHERE TRIP_ID IN
(SELECT TRIP_ID
FROM PARTICIPATED
WHERE NAME IN ('MARK','MARY')
GROUP BY TRIP_ID
HAVING COUNT(*) = 1
);
```
This works because given the where condition, Count(\*) will be:
* 0 if nither of them climbed (hypothetical - the where condition will not allow this row to show)
* 1 if one of them climbed
* 2 if both of them climbed
Having clauses limit queries based on conditions after grouping, so usually are based on aggregates like Count(\*), which will give you the number of records that are "contained" within each grouping.
The second question is a little tougher, but if I understand it correctly you should be able to use something like this:
```
SELECT climbed.trip_id, sum(peak.elev)
FROM climbed LEFT JOIN participated ON climbed.trip_id = participated.trip_id
LEFT JOIN peak ON climbed.peak = peak.name
GROUP BY climbed.trip_id
HAVING sum(peak.elev) > 500000;
```
This works because using the left join, the elevation for each climber is duplicated; then when you sum for each trip, it adds the elevation for each climber.
|
```
This is for your first query :
Select c.NAME
from PARTICIPATED a
// join with Climbed to get only peak based trips
inner join CLIMBED b
on a.TRIP_ID=b.TRIP_ID
// join with peak to ge the name of peak
inner join PEAK c
on c.NAME=b.PEAK
// on the result set, filter the results for specific persons only
where a.Name in ('Mary','Mark')
```
|
SQL Query - 2 queries involving COUNT() and the owner's of a certain trip
|
[
"",
"sql",
"count",
"oracle-xe",
""
] |
This for HackerRank Weather Observation 5 problem on databases (<https://www.hackerrank.com/challenges/weather-observation-station-5>). How would I solve this?
> Query the two cities in STATION with the shortest and longest CITY
> names, as well as their respective lengths (i.e.: number of characters
> in the name). If there is more than one smallest or largest city,
> choose the one that comes first when ordered alphabetically.
This is what I have so far
```
SELECT CITY, MAX LENGTH(CITY) FROM STATION;
```
But it obviously doesn't work.
|
Here is a solution with window functions:
```
select city, length(city)
from
(
select
city,
row_number() over (order by length(city), city) as shortest_is_one,
row_number() over (order by length(city) desc, city) as longest_is_one
from station
)
where shortest_is_one = 1 or longest_is_one = 1;
```
An alternative would be a `UNION` query with `FETCH FIRST 1 ROW ONLY`:
```
(
select city, length(city)
from station
order by length(city), city
fetch first 1 row only
)
union
(
select city, length(city)
from station
order by length(city) desc, city
fetch first 1 row only
);
```
And here is still another query, again using `UNION`:
```
select city, length(city)
from
(
select max(city) keep (dense_rank first order by length(city), city) as city
from station
union
select max(city) keep (dense_rank first order by length(city) desc, city) as city
from station
);
```
|
In a single table scan:
```
SELECT MIN( city ) KEEP ( DENSE_RANK FIRST ORDER BY LENGTH( city ) ) AS shortest_city,
MIN( city ) KEEP ( DENSE_RANK LAST ORDER BY LENGTH( city ) ) AS longest_city,
LENGTH( MIN( city ) KEEP ( DENSE_RANK FIRST ORDER BY LENGTH( city ) ) )
AS shortest_length,
LENGTH( MIN( city ) KEEP ( DENSE_RANK LAST ORDER BY LENGTH( city ) ) )
AS longest_length
FROM station;
```
**Explanation**
When using `... KEEP ( DENSE_RANK [FIRST|LAST] ORDER BY ...)` you can read from right-to-left to evaluate what it is doing.
So for:
```
MIN( city ) KEEP ( DENSE_RANK FIRST ORDER BY LENGTH( city ) )
```
Will:
1. The `ORDER BY` clause orders the results by the length of the city; then
2. The `KEEP ( DENSE_RANK FIRST` clause keeps only the result that is ranked first in the order (and if there are multiple results which are ranked equally first in the order then it will keep all those that are joint first); and, finally,
3. `MIN( city )` will return the minimum value for `city` (i.e. the alphabetically first name) out of the results which where kept (i.e. only those with the shortest length).
|
Finding max length of a string + which string it is in Oracle SQL
|
[
"",
"sql",
"oracle",
""
] |
I dont know SQL, but I need to use it for PHP, and I have a problem. When I try to create this table:
```
CREATE TABLE logs (
userbeinglogged VARCHAR(255) NOT NULL,
action_location VARCHAR(255) NOT NULL,
log_date DATE('YYYY-MM-DD') NOT NULL,
log_time TIME('00:00:00') NOT NULL,
);
```
At the last line, I get the error `"A closing bracket was expected (near')')"`.
I don't see the problem. Also, I'm pretty sure the `DATE` and `TIME` functions are incorrect, so if you could help me out with the syntax of those that would help too.
|
Remove your comma, right before the closing parenthesis.
And for advice just use `log_Date DATETIME`
|
Remove the last comma. Looks like it has an extra.
```
CREATE TABLE logs ( userbeinglogged VARCHAR(255) NOT NULL, action_location VARCHAR(255) NOT NULL, log_date DATE('YYYY-MM-DD') NOT NULL, log_time TIME('00:00:00') NOT NULL);
```
|
MySQL error doesnt make sense?
|
[
"",
"mysql",
"sql",
""
] |
I have a table, containing numbers (phone numbers) and a code (free or not available).
Now, I need to find series, of 30 consecutive numbers, like 079xxx100 - 079xxx130, and all of them to have free status.
Here is an example how my table looks like:
```
CREATE TABLE numere
(
value int,
code varchar(10)
);
INSERT INTO numere (value,code)
Values
(123100, 'free'),
(123101, 'free'),
...
(123107, 'booked'),
(123108, 'free'),
(...
(123130, 'free'),
(123131, 'free'),
...
(123200, 'free'),
(123201, 'free'),
...
(123230, 'free'),
(123231, 'free'),
...
```
I need a SQL query, to get me in this example, the 123200-123230 range (and all next available ranges).
Now, I found an example, doing more or less what I need:
```
select value, code
from numere
where value >= (select a.value
from numere a
left join numere b on a.value < b.value
and b.value < a.value + 30
and b.code = 'free'
where a.code = 'free'
group by a.value
having count(b.value) + 1 = 30)
limit 30
```
but this is returning only the first 30 available numbers, and not within my range (0-30). (and takes 13 minutes to execute, hehe..)
If anyone has an idea, please let me know (I am using SQL Server)
|
This seems like it works in my dataset. Modify the select and see if it works with your table name.
```
DECLARE @numere TABLE
(
value int,
code varchar(10)
);
INSERT INTO @numere (value,code) SELECT 123100, 'free'
WHILE (SELECT COUNT(*) FROM @numere)<=30
BEGIN
INSERT INTO @numere (value,code) SELECT MAX(value)+1, 'free' FROM @numere
END
UPDATE @numere
SET code='booked'
WHERE value=123105
select *
from @numere n1
inner join @numere n2 ON n1.value=n2.value-30
AND n1.code='free'
AND n2.code='free'
LEFT JOIN @numere n3 ON n3.value>=n1.value
AND n3.value<=n2.value
AND n3.code<>'free'
WHERE n3.value IS NULL
```
|
This is usual Island and Gap problem.
```
; with cte as
(
select *, grp = row_number() over (order by value)
- row_number() over (partition by code order by value)
from numere
),
grp as
(
select grp
from cte
group by grp
having count(*) >= 30
)
select c.grp, c.value, c.code
from grp g
inner join cte c on g.grp = c.grp
```
|
Find consecutive free numbers in table
|
[
"",
"sql",
"sql-server",
"gaps-and-islands",
""
] |
I want to generate a list of hours between to hours with an interval of 30 minutes.
For example an employee enters work at 09:00 and leaves at 18:00, so I want to generate this:
```
Hours
-----
09:00
09:30
10:00
10:30
11:00
11:30
12:00
12:30
13:00
13:30
14:00
14:30
15:00
15:30
16:00
16:30
17:00
17:30
18:00
```
How can I generate this? Thanks.
|
Well using recursive CTE, you can achieve this result.
Try below query -
```
DECLARE @timeFrom TIME = '09:00'
DECLARE @timeTo TIME = '18:00'
;with SourceHrs
as
(
select @timeFrom as [Hours]
UNION ALL
SELECT DATEADD(MINUTE, 30, [Hours]) from SourceHrs WHERE [Hours] < @timeTo
)
SELECT CONVERT(VARCHAR(5),Hours,108) FROM SourceHrs
```
**Result**
```
Hours
-------
09:00
09:30
10:00
10:30
11:00
11:30
12:00
12:30
13:00
13:30
14:00
14:30
15:00
15:30
16:00
16:30
17:00
17:30
18:00
```
|
This will give you what you need, using a tally is faster than recursive:
```
DECLARE @from time = '09:00'
DECLARE @to time = '09:00'
IF @from <= @to
WITH N(N)AS
(SELECT 1 FROM(VALUES(1),(1),(1),(1),(1),(1),(1))M(N)),
tally(N)AS(SELECT ROW_NUMBER()OVER(ORDER BY N.N)FROM N,N a)
SELECT top (datediff(minute, @from, @to)/ 30 + 1 )
LEFT(dateadd(minute, (N - 1 )*30, @from), 5)
FROM tally
```
|
How to generate hours between two hours in SQL Server?
|
[
"",
"sql",
"sql-server",
"time",
"hour",
""
] |
I'm working out of VB6 with SQL SERVER 2012. I found myself in a pickle. Basically i have a query that works fine and pulls the necessary data in SQL SERVER, however, I'm having a difficult time translating it to vb6 SQL code. Here's a working query in SQL SERVER...
```
SELECT 'TotalSum' = SUM(Units)
FROM tblDetail
WHERE MemberID = '117'
AND CAST(SStartD AS DATETIME) >= '4/1/2016'
AND CAST(SStartD AS DATETIME) <= '4/7/2016'
AND Service = 166
AND [CODE] IN('1919')
AND NOT(InvoiceNo = '11880'
AND DtlNo = 2
)
AND NOT(InvoiceNo = '11880'
AND AdjNo = 2
);
```
So when I try to write it in my vb6 application i do something like
```
SELECT 'TotalSum' = SUM(Units)
FROM tblDetail
WHERE MemberID = '117'
AND CAST(SStartD AS DATETIME) >= '4/1/2016'
AND CAST(SStartD AS DATETIME) <= '4/7/2016'
AND Service = 166
AND [CODE] IN('1919')
AND (InvoiceNo <> '11880'
AND DtlNo <> 2
)
AND (InvoiceNo <> '11880'
AND AdjNo <> 2
);
```
However, this is not giving me the same results. Whats happening is in my last two clauses
```
( InvoiceNo <> '11880' AND DtlNo<> 2) AND (InvoiceNo <> '11880' AND AdjNo <> 2)
```
When I run them finally in SQL SERVER don't have paranthesis and its absolutely detrimental that the 2 seperate clauses are in paranthesis. Anyone know what I can do? I think my last resort might be to create a store procedure but i don't really want to do that.
EDIT:
```
g_SQL = "SELECT 'SUM' = SUM(Units) " & _
"FROM tblDetail WHERE " & _
"MemID = " & udtCDtl.Lines(udtCDtlIdx).MemID & " AND " & _
"CAST(SStartD As DateTime) >= '" & StartDate & "' AND " & _
"CAST(SStartD As DateTime) <= '" & DateAdd("d", -1, EndDate) & "' AND " & _
"Service = 166 AND " & _
"[CODE] IN (β1919β)) And " & _
("InvoiceNo <> " & InvoiceDtlRS!InvoiceHdrNo & " OR " & _
"DtlNo <> " & (InvoiceDtlRS! InvoiceDtlNo, "")) & " AND " & _
("InvoiceNo <> " & InvoiceDtlRS!InvoiceHdrNo & " OR " & _
"AdjNo <> " & InvoiceDtlRS! InvoiceDtlNo)
```
|
This should work. I'm able to use SQL queries using NOT with ADODB in VB6.
```
g_SQL = "SELECT 'SUM' = SUM(Units) " & _
"FROM tblDetail WHERE " & _
"MemID = " & udtCDtl.Lines(udtCDtlIdx).MemID & " AND " & _
"CAST(SStartD As DateTime) >= '" & StartDate & "' AND " & _
"CAST(SStartD As DateTime) <= '" & DateAdd("d", -1, EndDate) & "' AND " & _
"Service = 166 AND " & _
"[CODE] IN ('1919')) And " & _
"NOT (InvoiceNo = " & InvoiceDtlRS!InvoiceHdrNo & " AND DtlNo = " & InvoiceDtlRS!InvoiceDtlNo & ") AND " & _
"NOT (InvoiceNo = " & InvoiceDtlRS!InvoiceHdrNo & " AND AdjNo = " & InvoiceDtlRS!InvoiceDtlNo & ")"
```
|
Your translation of NOT(InvoiceNo = '11880' AND DtlNo = 2) to (InvoiceNo <> '11880' AND DtlNo <> 2) is incorrect.
In formal logic, !(A & B) is equivalent to (!A or !B), so it should be:
```
(InvoiceNo <> '11880' OR DtlNo <> 2)
```
This is why you're getting different results. However, why not use the original query? There's nothing in VB6 which would prevent it.
EDIT
```
g_SQL = "SELECT 'SUM' = SUM(Units) " & _
"FROM tblDetail WHERE " & _
"MemID = " & udtCDtl.Lines(udtCDtlIdx).MemID & " AND " & _
"CAST(SStartD As DateTime) >= '" & StartDate & "' AND " & _
"CAST(SStartD As DateTime) <= '" & DateAdd("d", -1, EndDate) & "' AND " & _
"Service = 166 AND " & _
"[CODE] IN (β1919β)) And " & _
("InvoiceNo <> " & InvoiceDtlRS!InvoiceHdrNo & " OR " & _
"DtlNo <> " & (InvoiceDtlRS! InvoiceDtlNo, "")) & " AND " & _
("InvoiceNo <> " & InvoiceDtlRS!InvoiceHdrNo & " OR " & _
"AdjNo <> " & InvoiceDtlRS! InvoiceDtlNo)
```
You've got a ) in the wrong place twice. Also, the ) on the final live would be a syntax error I think. The last 5 lines should be:
```
"[CODE] IN (β1919β) And " & _
("InvoiceNo <> " & InvoiceDtlRS!InvoiceHdrNo & " OR " & _
"DtlNo <> " & (InvoiceDtlRS!InvoiceDtlNo, "") & " AND " & _
("InvoiceNo <> " & InvoiceDtlRS!InvoiceHdrNo & " OR " & _
"AdjNo <> " & InvoiceDtlRS!InvoiceDtlNo & ")"
```
|
SQL query assistance needed with 'NOT'
|
[
"",
"sql",
"sql-server",
"select",
"sql-server-2012",
"vb6",
""
] |
show create table USERS;
And i will get that result .
```
CREATE TABLE `USERS` (
`UR_ID` bigint(20) NOT NULL,
`DEPT_ID` bigint(20) DEFAULT NULL,
`DN_ID` bigint(20) NOT NULL,
`CREATED_BY` varchar(45) NOT NULL,
`LAST_UPDATED_BY` varchar(45) NOT NULL,
`LAST_UPDATED_DT` datetime NOT NULL,
`UR_LOGIN_NAME` varchar(255) NOT NULL,
`TRANS_ID` bigint(20) DEFAULT NULL,
PRIMARY KEY (`UR_ID`),
UNIQUE KEY `Uk11` (`UR_LOGIN_NAME`),
KEY `SYS_C0018877` (`UR_ID`),
KEY `SYS_C0018878` (`DEPT_ID`),
KEY `SYS_C0018879` (`DN_ID`),
**KEY `SYS_C0018880` (`CREATED_BY`),**
KEY `SYS_C0018881` (`LAST_UPDATED_BY`),
KEY `SYS_C0018882` (`LAST_UPDATED_DT`),
KEY `SYS_C0018883` (`UR_LOGIN_NAME`),
CONSTRAINT `fk_USERS_2` FOREIGN KEY (`DN_ID`) REFERENCES `DESIGNATION` (`DN_ID`) ON DELETE NO ACTION ON UPDATE NO ACTION
) ENGINE=InnoDB DEFAULT CHARSET=utf8 |
```
Now i want to drop foreign key CREATED\_BY and i run
**ALTER TABLE USERS DROP FOREIGN KEY SYS\_C0018880;**
ERROR 1025 (HY000): Error on rename of './dbname/USERS' to './dbname/#sql2-3ea-2c' (errno: 152)
Also I have used
**ALTER TABLE USERS DROP FOREIGN KEY CREATED\_BY;**
But i again got same error
Error on rename of './dbname/USERS' to './dbname/#sql2-3ea-2c' (errno: 152)
Please any one share how can i drop this foreign key and if possible please share what is KEY `SYS_C0018880` (`CREATED_BY`) . I knew this key is foreign key because on desc table\_name i got this result;
| DN\_ID | bigint(20) | NO | MUL | NULL | |
**| CREATED\_BY | varchar(45) | NO | MUL | NULL | |**
| LAST\_UPDATED\_BY | varchar(45) | NO | MUL | NULL | |
| LAST\_UPDATED\_DT | datetime | NO | MUL | NULL |
|
The root cause is file(s) #sql2-3ea-2c\* in data directory and/or table with such name in internal InnoDB dictionary. That would prevent any ALTER operation on USERS table.. Search google for 'removing orphaned innodb tables' for instructions reg. that
|
By default MariaDB appends \_ibfk to the name of the foreign keys if you do not specify it.So, please use the following code format to drop foreign keys where you have not specified foreign key name:
```
ALTER TABLE table_name DROP FOREIGN KEY foreign_key_ibfk;
```
|
Can not drop FOREIGN KEY in Maria DB
|
[
"",
"mysql",
"sql",
"database",
"foreign-keys",
"mariadb",
""
] |
I have a system integration project which needs to CRUD from one DB to another. Not especially complicated. However, when it comes to deleting rows which exist in the target but not in the source, I ran into a little trouble. The standard patterns include: LEFT JOIN, NOT EXISTS or NOT IN. I chose the LEFT JOIN. My 'Phone' table uses a composite key, Employee 'Id' and the PhoneType: Work, Home, Mobile, etc. The standard left join will delete ANY target Phone number NOT in the source. This clears out the whole table. **NOTE: I am updating only records which have changed since the last update, NOT the whole target & source**. So, I wrote a fix which I suspect is really poor SQL:
```
-- SOURCE
DECLARE @tmpPhones TABLE(Id varchar(8), PhoneType int, PhoneNumber varchar(30), PRIMARY KEY (Id, PhoneType))
INSERT into @tmpPhones values
('TEST123', 1, '12345678'),
('TEST123', 2, '12345678'),
('TEST123', 3, '12345678')
-- TARGET
DECLARE@Phone TABLE( Id varchar(8), PhoneType int, PhoneNumber varchar(30), PRIMARY KEY (Id, PhoneType))
INSERT into @Phone values
('TEST123', 1, '12345678'), <-- Exists in both, leave
('TEST123', 2, '12345678'), <-- Exists in both, leave
('TEST123', 3, '12345678'), <-- Exists in both, leave
('TEST123', 4, '12345678'), <-- ONLY delete this one!
('TEST456', 2, '12345678'), <-- Ignore this employee Id
('TEST456', 3, '12345678'), ""
('TEST456', 4, '12345678') ""
DELETE p
FROM @Phone p
LEFT JOIN @tmpPhones t
ON t.Id = p.Id AND t.PhoneType = p.PhoneType
WHERE t.Id IS NULL AND t.PhoneType IS NULL
AND p.Id IN (SELECT Id FROM @tmpPhones) <-- a sad hack?
```
This works, but I feel like there is a better way to make sure we are only deleting records for THIS employee, not all the others.
Any suggestions?
|
Use `exists`.
```
DELETE p
FROM @Phone p
where exists (select 1 from @tmpPhones where Id = p.Id)
AND not exists (select 1 from @tmpPhones where PhoneType = p.PhoneType)
```
Edit: Deleting using `cte`.
```
with todelete as (
select id,phonetype from phone
except
select id,phonetype from tmpphones t
where exists (select 1 from phone where id = t.id)
)
delete from phone
where exists (select 1 from todelete where phone.id = id and phone.phonetype = phonetype)
```
|
I think two exists statements pretty much capture the logic: as you describe it
```
DELETE p
FROM @Phone p
WHERE EXISTS (SELECT 1 FROM @tmpPhone t WHERE t.id = p.id) AND
NOT EXISTS (SELECT 1 FROM @tmpPhone t WHERE t.id = p.id AND t.PhoneType = p.PhoneType) ;
```
|
SQL Delete Where Not In with Composite Key
|
[
"",
"sql",
"sql-server",
"sql-server-2012",
""
] |
I'm trying to return the `DISTINCT` IDs from a table if it meets certain criteria.
> 1. An ID must contain CID of 1,26,33,49 (all)
> 2. An ID should NOT contain CID of 38 or 46 or 67
Here's the table.
[](https://i.stack.imgur.com/oUA2F.png)
Here's what I have so far:
```
SELECT DISTINCT(c1.id) FROM cte c1
LEFT JOIN cte c2
ON c1.id = c2.ID
AND c2.CID = 38
where c2.id IS null
```
This only works if I specify a single ID that should not be presented. If I replace this
```
AND c2.CID = 38
```
with
```
AND c2.CID IN (38,46,67)
```
This will not return me the correct ID. Also, if I add a criteria for `c1.CID` to include IDs `1,26,33,49` It will also return an incorrect ID.
Based on this example table, I should only get back ID `12345`.
Here's [SQLFIDDLE.](http://sqlfiddle.com/#!6/9c862/3)
I'm trying to get this output without using a `subquery`.
Any suggestions?
|
[SQL Fiddle](http://sqlfiddle.com/#!6/9c862/9)
Use conditional aggregation.
```
SELECT id
FROM cte
group by id
having sum(case when cid in (1,26,33,49) then 1 else 0 end) = 4
and sum(case when cid in (38,46,67) then -1 else 0 end) = 0
```
|
EXCEPT is opposite to UNION in a manner of speaking.
```
SELECT c1.id
FROM cte c1
WHERE c1.CID IN(1,26,33,49)
GROUP BY c1.id
HAVING COUNT(DISTINCT c1.CID) = 4
EXCEPT
SELECT DISTINCT c1.id
FROM cte c1
WHERE c1.CID IN(38,46,67)
```
|
Return distinct ID based on few criteria
|
[
"",
"sql",
"sql-server-2012",
""
] |
I have a search box for people's names in my application. Candidate's names are stored as firstName and then lastName. When I search the application, the application input submits a call to an ajax function, where I have this piece of code.
```
filters.where = {
$or: ['firstName', 'lastName', 'email'].map((item) =>
({[item]: {[queryClause]: `%${query}%`}}))
};
const scope = req.query.list;
const candidates = await CandidateModel.scope(scope).findAll(filters);
```
Hence if I type in the search box "John", it will find the candidate, and if I type in the word "Smith" it will find the candidate.
The problem is, if I type in the full name "John Smith" it won't come up, because the query is checking to see if "John Smith" equals "John", i.e. the first name, or if "John Smith" equals "Smith", the last name. It doesn't equal either of these.
Is there a way to filter via combined fields in sequalize, so it tests if the query matches the firstName AND lastName fields combined?
|
Seems like you might have to split the query input and search all fields on the terms passed in. for example:
```
var queryClause ='John Smith';
filters.where = {
$or: _.flatten(_.map(['firstName', 'lastName', 'email'], function(){
return _.map(queryClause.split(' '), function(q){
return {[item]: { $like : '%'+q+'%'}}
})
}))
}
```
which would output something like:
```
{
"where": {
"$or": [{
"firstName": {
"$like": "%John%"
}
}, {
"firstName": {
"$like": "%Smith%"
}
}, {
"lastName": {
"$like": "%John%"
}
}, {
"lastName: {
"$like": "%Smith%"
}
},
{
"email": {
"$like": "%John%"
}
},
{
"email": {
"$like": "%Smith%"
}
}]
}
}
```
--btw using [lodash](https://lodash.com/docs) in the above example code
|
use something like the following to search by full name in this form "firstName lastName"
```
Sequelize.where(Sequelize.fn("concat", Sequelize.col("firstName"), ' ', Sequelize.col("lastName")), {
$ilike: '%john smith%'
})
```
by doing so you fix the issue of firstname or last name might have spaces.
|
Query two combined fields at once in Sequelize
|
[
"",
"sql",
"node.js",
"sequelize.js",
""
] |
I can't seem to figure out the syntax issue here.
This works, but returns nulls;
```
SELECT jo.Job_Operation, jo.Job, jo.Work_Center, jo.Operation_Service, jo.Est_Total_Hrs,
(SELECT SUM(jot.Act_Run_Hrs)
FROM PRODUCTION.dbo.Job_Operation_Time jot
WHERE jot.Job_Operation = jo.Job_Operation) AS Cost
FROM PRODUCTION.dbo.Job_Operation jo
WHERE jo.Job = 'A5076027'
```
So I'm trying to use ISNULL here but I get an error:
```
SELECT jo.Job_Operation, jo.Job, jo.Work_Center, jo.Operation_Service, jo.Est_Total_Hrs,
(ISNULL(SELECT SUM(jot.Act_Run_Hrs)
FROM PRODUCTION.dbo.Job_Operation_Time jot
WHERE jot.Job_Operation = jo.Job_Operation,0)) AS Cost
FROM PRODUCTION.dbo.Job_Operation jo
WHERE jo.Job = 'A5076027'
```
The error is:
Msg 156, Level 15, State 1, Line 2
Incorrect syntax near the keyword 'SELECT'.
Msg 102, Level 15, State 1, Line 4
Incorrect syntax near ','.
Can anyone see what I'm missing here?
Thanks!
|
try this:
```
SELECT jo.Job_Operation, jo.Job, jo.Work_Center, jo.Operation_Service, jo.Est_Total_Hrs,
ISNULL((SELECT SUM(jot.Act_Run_Hrs)
FROM PRODUCTION.dbo.Job_Operation_Time jot
WHERE jot.Job_Operation = jo.Job_Operation),0) AS Cost
FROM PRODUCTION.dbo.Job_Operation jo
WHERE jo.Job = 'A5076027'
```
|
I don't think you need a correlated subquery here. This seems to me like a standard left join is all that is required.
```
SELECT jo.Job_Operation
, jo.Job
, jo.Work_Center
, jo.Operation_Service
, jo.Est_Total_Hrs
, SUM(isnull(jot.Act_Run_Hrs, 0)) AS Cost
FROM PRODUCTION.dbo.Job_Operation jo
left join PRODUCTION.dbo.Job_Operation_Time jot ON jot.Job_Operation = jo.Job_Operation
WHERE jo.Job = 'A5076027'
GROUP BY
jo.Job_Operation
, jo.Job
, jo.Work_Center
, jo.Operation_Service
, jo.Est_Total_Hrs
```
|
ISNULL Syntax Challenge
|
[
"",
"sql",
"sql-server",
""
] |
I have a SQL date field that is stored as `nvarchar(max)`.
Example: `4/7/2016 12:50:03 AM`
I need to convert this to `int`.
I have this: `cast(convert(char(8), DateField,112) as int)`
but I'm getting an error message
> Conversion failed when converting the varchar value '5/22/201' to data type int
Obviously, `/` isn't an `int` character. Any suggestions??
|
I believe if you cast the varchar to a date it will work
cast(convert(char(8),cast(DateField as datetime),112) as int)
|
You can `replace` the slashes with blanks:
```
CAST(REPLACE(CONVERT(char(8),DateField,112),'/','') as int)
```
|
Trying to convert nvarchar stored as datetime to int
|
[
"",
"sql",
"sql-server-2012",
""
] |
I have two tables with a many-to-many association in postgresql. The first table contains activities, which may count zero or more reasons:
```
CREATE TABLE activity (
id integer NOT NULL,
-- other fields removed for readability
);
CREATE TABLE reason (
id varchar(1) NOT NULL,
-- other fields here
);
```
For performing the association, a join table exists *between* those two tables:
```
CREATE TABLE activity_reason (
activity_id integer NOT NULL, -- refers to activity.id
reason_id varchar(1) NOT NULL, -- refers to reason.id
CONSTRAINT activity_reason_activity FOREIGN KEY (activity_id) REFERENCES activity (id),
CONSTRAINT activity_reason_reason FOREIGN KEY (reason_id) REFERENCES reason (id)
);
```
I would like to count the possible association between activities and reasons. Supposing I have those records in the table `activity_reason`:
```
+--------------+------------+
| activity_id | reason_id |
+--------------+------------+
| 1 | A |
| 1 | B |
| 2 | A |
| 2 | B |
| 3 | A |
| 4 | C |
| 4 | D |
| 4 | E |
+--------------+------------+
```
I should have something like:
```
+-------+---+------+-------+
| count | | | |
+-------+---+------+-------+
| 2 | A | B | NULL |
| 1 | A | NULL | NULL |
| 1 | C | D | E |
+-------+---+------+-------+
```
Or, eventually, something like :
```
+-------+-------+
| count | |
+-------+-------+
| 2 | A,B |
| 1 | A |
| 1 | C,D,E |
+-------+-------+
```
I can't find the SQL query to do this.
|
We need to compare *sorted* lists of reasons to identify equal sets.
```
SELECT count(*) AS ct, reason_list
FROM (
SELECT array_agg(reason_id) AS reason_list
FROM (SELECT * FROM activity_reason ORDER BY activity_id, reason_id) ar1
GROUP BY activity_id
) ar2
GROUP BY reason_list
ORDER BY ct DESC, reason_list;
```
`ORDER BY reason_id` in the innermost subquery would work, too, but adding `activity_id` is typically faster.
And we don't strictly need the innermost subquery at all. This works as well:
```
SELECT count(*) AS ct, reason_list
FROM (
SELECT array_agg(reason_id ORDER BY reason_id) AS reason_list
FROM activity_reason
GROUP BY activity_id
) ar2
GROUP BY reason_list
ORDER BY ct DESC, reason_list;
```
But it's typically slower for processing all or most of the table. [Quoting the manual:](http://www.postgresql.org/docs/current/interactive/functions-aggregate.html)
> Alternatively, supplying the input values from a sorted subquery will usually work.
We *could* use `string_agg()` instead of `array_agg()`, and that would work for your example with `varchar(1)` (which might be more efficient with data type `"char"`, btw). It can fail for longer strings, though. The aggregated value can be ambiguous.
---
If `reason_id` would be an **`integer`** (like it typically is), there is another, faster solution with `sort()` from the additional module [intarray](http://www.postgresql.org/docs/current/interactive/intarray.html):
```
SELECT count(*) AS ct, reason_list
FROM (
SELECT sort(array_agg(reason_id)) AS reason_list
FROM activity_reason2
GROUP BY activity_id
) ar2
GROUP BY reason_list
ORDER BY ct DESC, reason_list;
```
Related, with more explanation:
* [Compare arrays for equality, ignoring order of elements](https://stackoverflow.com/questions/12870105/compare-arrays-for-equality-ignoring-order-of-elements/12870508#12870508)
* [Storing and comparing unique combinations](https://stackoverflow.com/questions/29732650/storing-and-comparing-unique-combinations/29734353#29734353)
|
I think you can get what you want using this query:
```
SELECT count(*) as count, reasons
FROM (
SELECT activity_id, array_agg(reason_id) AS reasons
FROM (
SELECT A.activity_id, AR.reason_id
FROM activity A
LEFT JOIN activity_reason AR ON AR.activity_id = A.activity_id
ORDER BY activity_id, reason_id
) AS ordered_reasons
GROUP BY activity_id
) reason_arrays
GROUP BY reasons
```
First you aggregate all the reasons for an activity into an array for each activity. You have to order the associations first, otherwise ['a','b'] and ['b','a'] will be considered different sets and will have individual counts. You also need to include the join or any activity that doesn't have any reasons won't show up in the result set. I'm not sure if that is desirable or not, I can take it back out if you want activities that don't have a reason to not be included. Then you count the number of activities that have the same sets of reasons.
Here is a [sqlfiddle](http://sqlfiddle.com/#!15/fa811/1) to demonstrate
As mentioned by Gordon Linoff you could also use a string instead of an array. I'm not sure which would be better for performance.
|
Query to count the frequence of many-to-many associations
|
[
"",
"sql",
"arrays",
"postgresql",
"many-to-many",
"aggregate",
""
] |
I have a table of 20000 records. each Record has a datetime field. I want to select all records where gap between one record and subsequent record is more than one hour [condition to be applied on datetime field].
can any one give me the SQL command code for this purpose.
regards
KAM
|
This can also be done with a sub query, which should work on **all** DBMS. As gordon said, date/time functions are different in every one.
```
SELECT t.* FROM YourTable t
WHERE t.DateCol + interval '1 hour' < (SELECT min(s.DateCol) FROM YourTable s
WHERE t.ID = s.ID AND s.DateCol > t.DateCol)
```
You can replace this:
```
t.DateCol + interval '1 hour'
```
With one of this so it will work on almost every DBMS:
```
DATE_ADD( t.DateCol, INTERVAL 1 hour)
DATEADD(hour,1,t.DateCol)
```
|
ANSI SQL supports the `lead()` function. However, date/time functions vary by database. The following is the logic you want, although the exact syntax varies, depending on the database:
```
select t.*
from (select t.*,
lead(datetimefield) over (order by datetimefield) as next_datetimefield
from t
) t
where datetimefield + interval '1 hour' < next_datetimefield;
```
Note: In Teradata, the `where` would be:
```
where datetimefield + interval '1' hour < next_datetimefield;
```
|
Difference of datetime column in SQL
|
[
"",
"sql",
"datetime",
""
] |
I try to select all names with "Schmidt". But some names are lower case and some upper case.
I try this :
```
Select * from Account
where name like '%chmidt%'
or name like '%CHMIDT%'
```
But when one letter inside the word is upper case(e.g. SchmidT), the statement didnt find this.
Know somebody a easy way to solve this problem ?
|
I am guessing that you are using Oracle and not MySQL, because it is case senstive by default. Just use the `upper()` or `lower()` functions:
```
Select *
from Account
where lower(name) like '%chmidt%';
```
I would add an `s` if you want names like "Schmidt":
```
Select a.*
from Account a
where lower(a.name) like '%schmidt%';
```
Note: This will find `hammerschmidt` and similar names. If you want an exact match, remove the wildcards or use `=`.
|
Do a lower case comparison:
```
Select * from Account
where lower(name) like '%chmidt%'
```
|
SQL - Select name, no matter upper or lower case letters
|
[
"",
"sql",
"database",
"oracle",
"plsql",
""
] |
**Task :**
Find country to which maximum of customers belong.
**Query**
```
SELECT country,
count(*)
FROM customers
GROUP BY country
HAVING count(*) =
(SELECT max(max_sal)
FROM
(SELECT count(*) max_sal
FROM customers
GROUP BY country)) ;
```
**Result**:
[](https://i.stack.imgur.com/wtn7s.png)
The Result is correct but i think it is difficult way to write query
**Question :** Is there any simple way to rewrite this query.
|
I might be missing something, but it can be as simple as this:
```
SELECT *
FROM ( SELECT country, COUNT (*) max_sal
FROM customers
GROUP BY country
ORDER BY COUNT (*) DESC)
WHERE ROWNUM <= 1;
```
|
You can use `WITH` clause:
```
WITH
c AS (
SELECT country, Count(1) n
FROM customers
GROUP BY country)
SELECT country, n
FROM c
WHERE n = (SELECT Max(n) FROM c)
```
|
Query simplification Oracle Northwind
|
[
"",
"sql",
"database",
"oracle",
"northwind",
""
] |
Why do I need the SELECT privilege on this:
```
UPDATE Sailors S SET
S.rating = S.rating - 1
```
While I don't need it for this query:
```
UPDATE Sailors S
SET S.rating = 8
```
|
In the first you are selecting
```
= S.rating - 1
```
In the second you are not selecting
```
= 8
```
[sp\_table\_privileges](https://msdn.microsoft.com/en-us/library/ms173835.aspx)
> SELECT = GRANTEE can retrieve data for one or more of the columns.
>
> INSERT = GRANTEE can provide data for new rows for one or more of the columns.
>
> UPDATE = GRANTEE can modify existing data for one or more of the columns.
|
It looks like you're reading from S in the first query (the second S.rating) where as in the second query you're only ever writing data to S. To read data you will need SELECT permissions.
|
UPDATE and SELECT
|
[
"",
"sql",
"database",
"select",
"updates",
"privileges",
""
] |
I have two tables here:
table Product:
```
βββββββββββββββ¦βββββββββββββ¦βββββββββββββββββββββ
β ProductID β Name β productImageURL β
β ββββββββββββββ¬βββββββββββββ¬βββββββββββββββββββββ£
β 10 β Product 1 β β
β 20 β Product 2 β β
β 30 β Product 3 β β
βββββββββββββββ©βββββββββββββ©βββββββββββββββββββββ
```
and table ProductImage
```
βββββββββββββββ¦ββββββββββββββββ¦βββββββββββββββββββ
β ImageID β ProductID β ImageURL β
β ββββββββββββββ¬ββββββββββββββββ¬βββββββββββββββββββ£
β 1 β 10 β /imageone.jpg β
β 2 β 20 β /imagetwo.jpg β
β 3 β 30 β /imagethree.jpgβ
βββββββββββββββ©ββββββββββββββββ©βββββββββββββββββββ
```
I would want to copy Image url from ProductImage table to Product table with coresponding Product ID. How do I do that in sql?
|
Use `JOIN` in your `UPDATE`:
```
UPDATE p
SET p.productImageURL = i.ImageURL
FROM Product p
INNER JOIN ProductImage i
ON i.ProductID = p.ProductID
```
|
Same idea as that of Felix but cleaner SQL. Works in MySql.
```
UPDATE Product p, ProductImage i
SET p.productImageURL = i.ImageURL
WHERE i.ProductID = p.ProductID;
```
|
Copy data from one table to another table that has the same id
|
[
"",
"sql",
"sql-server",
""
] |
My sql script is like this :
```
SELECT hotel_code, star FROM hotel WHERE star REGEXP '^[A-Za-z0-9]+$'
```
The result is like this :
<http://snag.gy/kQ7t6.jpg>
I want the result of select the field that contains numbers and letters.
So, the result is like this :
```
3EST
2EST
```
Any solution to solve my problem
Thank you
|
I guess you're trying to get Must alphanumeric values. It can be achieved by following.
```
^([0-9]+[A-Za-z]+[0-9]*)|([A-Za-z]+[0-9]+[A-Za-z]*)$
```
|
The solution seems be like that:
```
'^[A-Za-z]+[0-9]+[A-Za-z0-9]+$|^[0-9]+[A-Za-z]+[A-Za-z0-9]+$'
```
You'll find elements beginning with letters and numbers OR numbers and letters and then containing both ones.
|
How to select the field that contains numbers and letters?
|
[
"",
"mysql",
"sql",
""
] |
I would like to know which SQL dialect is being used in the snippet. Is it MSSQL, MySQL, PL/SQL or is it invalid SQL?
```
CREATE TABLE ACTable (
id int NOT NULL,
x float NOT NULL,
y float NOT NULL,
z float NOT NULL
)
CONSTRAINT [PK_FTTable] PRIMARY KEY( id )
```
|
The SQL is incorrect but this SQL would work in SQL Server:
```
CREATE TABLE ACTable (
id int NOT NULL,
x float NOT NULL,
y float NOT NULL,
z float NOT NULL
CONSTRAINT [PK_FTTable] PRIMARY KEY( id ))
```
|
This part is OK in almost every rdbms
```
CREATE TABLE ACTable (
id int NOT NULL,
x float NOT NULL,
y float NOT NULL,
z float NOT NULL
)
```
The problem is the constraint, and doesnt look like a valid statment for any db.
You can try that `CREATE TABLE` on every rdmbs on **[sqlFiddle](http://sqlfiddle.com/)**.
|
Detect SQL dialect
|
[
"",
"sql",
"sql-server",
""
] |
I have select statement like below:
```
select [Fiscal Year], sum([value]),
YEAR(DATEADD(year,-1,[Fiscal Year])) as previous_year
from [table1]
group by [Fiscal Year]
```
How to add after column `previous_year`, `sum([value])` from previous year?
[](https://i.stack.imgur.com/uYOG0.png)
|
Please try the below query for SQL 2008
```
select t1.[Fiscal Year],t1.Value,(t1.[Fiscal Year]-1) [previous_year],t2.Value [previous_value]
from
( select [Fiscal Year], sum([value]) value
from [table1]
group by [Fiscal Year]
)t1
LEFT JOIN
(
select [Fiscal Year], sum([value]) value
from [table1]
group by [Fiscal Year]
)t2
ON t1.[Fiscal Year]=t2.[Fiscal Year]+1
```
**[SQL demo link](http://sqlfiddle.com/#!3/37baa/1)**
|
```
--to prepare the environment, let said this is your table
declare @table table
(
fiscalYr integer,
Value integer,
previousyr integer,
prevValue integer
)
--to prepare the value, let said these are your value
insert into @table values (2014,165,2013,0);
insert into @table values (2015,179,2014,0);
insert into @table values (2016,143,2015,0);
--the SQL
select A.fiscalYr,A.Value,A.previousyr, B.Value
from @table A left join
@table B on B.fiscalYr = A.previousyr
```
**This the answer I got**
fiscalYr| value| PrevYr| Value
2014| 165| 2013| NULL
2015| 179| 2014| 165
2016| 143| 2015| 179
|
Display sum from previous year
|
[
"",
"sql",
"sql-server",
"sql-server-2008",
"datetime",
"sqldatetime",
""
] |
Suppose I have two tables `PO` and `PO_Line` and I want to list all fields from `PO` plus the quantity of rows from `PO_Line` that link back to each row in `PO` I would write a query something like -
```
SELECT
PO.propA,
PO.propB,
PO.propC,
PO.propD,
PO.propE,
...
PO.propY,
COUNT(PO_Line.propA) LINES
FROM
PO
LEFT JOIN
PO_Lines
ON
PO.ID = PO_Lines.PO_ID
```
Obviously this would give an error someting along the lines of -
> Column 'PO.propA' is invaalid in the select list because it is not contained in either an aggregate function or the GROUP BY clause.
So to get the query to run I will add a `GROUP BY` clause to the end of the query and copy and paste my select lines, like so -
```
SELECT
PO.propA,
PO.propB,
PO.propC,
PO.propD,
PO.propE,
...
PO.propY,
COUNT(PO_Line.propA) LINES
FROM
PO
LEFT JOIN
PO_Lines
ON
PO.ID = PO_Lines.PO_ID
GROUP BY
PO.propA,
PO.propB,
PO.propC,
PO.propD,
PO.propE,
...
PO.propY
```
Which works perfectly however it all feels a little unwieldy, especially if I've named my columns i.e. -
```
SELECT
PO.propA AS 'FIRST PROPERTY',
PO.propB AS 'SECOND PROPERTY',
PO.propC AS 'THIRD PROPERTY',
PO.propD AS 'ANOTHER PROPERTY',
PO.propE AS 'YOU GET THE IDEA',
...
PO.propY
```
and I have to copy/paste the entries from the select clause and then delete the column names.
So my question is - Is there a shorthand method to say group by all non-aggregated entries found in the select clause?
|
I think you just want window functions:
```
SELECT . . .,
COUNT(PO_Line.propA) OVER (PARTITION BY PO.ID) as LINES
FROM PO LEFT JOIN
PO_Lines
ON PO.ID = PO_Lines.PO_ID;
```
|
Reading your query I think you might not need a `GROUP BY` to begin with:
```
SELECT
PO.propA,
PO.propB,
PO.propC,
PO.propD,
PO.propE,
...
PO.propY,
(SELECT COUNT(*) FROM PO_Lines WHERE PO.ID = PO_Lines.PO_ID) LINES
FROM
PO
```
|
Grouping by all non-aggregated columns in MSSQL
|
[
"",
"sql",
"sql-server",
"sql-server-2008",
"t-sql",
""
] |
I have the following SQL query:
```
SELECT SUM(tmp.mval), tmp.timekey FROM
(SELECT teghamas,
MAX(arzheq) as mval,
ceil(UNIX_TIMESTAMP(zhamanak)/(60 * 60)) AS timekey
FROM `masnakcutyun`
LEFT JOIN teghkentron ON `masnakcutyun`.`teghKentronId`=`teghkentron`.`teghKentronId`
WHERE teghkentron.hamaynq="London" group by timekey, teghkentron.teghamas)
AS tmp
GROUP BY tmp.timekey
```
It works fine in phpMyAdmin. But there's a warning there saying:
"This type of clause was previously parsed (near select)".
Can you guess what is the problem? The query can execute and return the expected results.
|
It seems to be a phpMyAdmin parser bug, see [the issue on github](https://github.com/phpmyadmin/phpmyadmin/issues/12080), the query itself is valid.
|
MySQL allow write subquery in `from` clause, but this is know [issue](https://github.com/phpmyadmin/phpmyadmin/issues/12080), you can create view and use it :
```
CREATE VIEW viewname AS (SELECT teghamas,
MAX(arzheq) as mval,
ceil(UNIX_TIMESTAMP(zhamanak)/(60 * 60)) AS timekey
FROM `masnakcutyun`
LEFT JOIN teghkentron ON `masnakcutyun`.`teghKentronId`=`teghkentron`.`teghKentronId`
WHERE teghkentron.hamaynq="London" group by timekey, teghkentron.teghamas) ;
SELECT SUM(mval) as MySum, timekey
FROM viewname
GROUP BY timekey
```
|
MySQL parsing error in phpMyAdmin ("This type of clause was previously parsed")
|
[
"",
"mysql",
"sql",
"select",
"phpmyadmin",
""
] |
This may have been asked and answered before, but I'm having difficulty even phrasing it as a question (hence the title).
I have a databse table with is essentially
```
[EventId] INT,
[FirstOccurance_Month] DATETIME,
[LastOccurance_Month] DATETIME
```
With some data similar to this:
```
[EventId] [FirstOccurance_Month] [LastOccurance_Month]
1 2015-11-01 2016-01-01
2 2015-12-01 2016-03-01
3 2016-02-01 2016-02-01
```
What i'm trying to achieve is a SQL statement that will output the EventId as one column and all of the months it occurred in as a second column. So for the data above it would look like this:
```
[EventId] [Month]
1 2015-11-01
1 2015-12-01
1 2016-01-01
2 2015-12-01
2 2016-01-01
2 2016-02-01
2 2016-03-01
3 2016-02-01
```
I have a feeling its going to involve CROSS APPLY, so i'm heading to find out what that actually is...
|
You can use a Tally table for this task:
```
;WITH Tally AS (
SELECT ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) - 1 AS i
FROM (VALUES (0), (0), (0), (0), (0)) AS t1(n)
CROSS JOIN (VALUES (0), (0), (0), (0), (0)) AS t2(n)
)
SELECT m.EventId, DATEADD(m, t.i, FirstOccurance_Month) AS [Month]
FROM Tally AS t
INNER JOIN mytable AS m
ON DATEADD(m, t.i, FirstOccurance_Month) <= LastOccurance_Month
ORDER BY m.EventId
```
The Tally table used by the above query contains 25 rows. You can easily adjust it to suit your actual requirement.
[**Demo here**](http://sqlfiddle.com/#!6/e40d9/1)
|
One way is using recursive CTE
```
; with rcte as
(
select EventId, [Month] = FirstOccurance_Month
from yourtable
union all
select t.EventId, [Month] = dateadd(month, 1, r.[Month])
from rcte r
inner join yourtable t on r.EventId = t.EventId
where r.[Month] < t.LastOccurance_Month
)
select *
from rcte
```
|
T-SQL All occurrences in a range
|
[
"",
"sql",
"sql-server",
""
] |
I have the following select statement that returns exactly what I want:
```
DECLARE @result varchar(max) = ''
SELECT @result += (result.Fullname + '<br/>')
FROM (SELECT DISTINCT Fullname
FROM Providers
WHERE Status='A') as result
select substring(@result, 0, len(@result) - 4)
```
The only problem is, I want the output from this query to be displayed as a column entry from a larger select statement.
Eg.
```
SELECT
Column AS [AColumnName],
SELECT @result += (result.Fullname + '<br/>')
FROM (SELECT DISTINCT Fullname
FROM Providers
WHERE Status='A') as result
select substring(@result, 0, len(@result) - 4) as [LenderList]
FROM
Table
```
But I am currently getting the error: Incorrect syntax near the keyword 'SELECT'.
The error pointing to line 4
Any ideas?
|
You need aggregate string concatenation in SQL Server. There are already many answers on the subquery, but to save you the trouble:
```
SELECT Column AS [AColumnName],
STUFF((SELECT DISTINCT '<br/>' + Fullname
FROM Providers
WHERE Status = 'A'
FOR XML PATH (''), TYPE
).value('.', 'varchar(max)'
), 1, 5, ''
) as result
FROM Table;
```
The use of the type is important because your string has special XML characters.
|
Which Database? If you can use for xml, then something like...
```
select substring(a.innards, 0, len(a.innards) - 4) as [LenderList]
from
(
SELECT innards = STUFF(
(SELECT DISTINCT Fullname + '</br>'
FROM Providers
WHERE [Status] = 'A'
FOR XML PATH(''), TYPE).value('.[1]', 'nvarchar(max)')
, 1
, 0
, '')
) a
```
|
Select statement within a select statement
|
[
"",
"sql",
"sql-server",
"select",
""
] |
How do I get all Mondays or Tuesdays of Previous Month? I haven't seen any example about it.
|
```
;WITH CTE (X)
AS
(
SELECT DATEADD(MM,DATEDIFF(MM,0,GETDATE())-1,0)
),
CTE2(N) AS
(
SELECT 0
UNION ALL
SELECT 1+N FROM CTE2 WHERE N< (SELECT DATEDIFF(DD,DATEADD(MM,DATEDIFF(MM,0,GETDATE())-1,0),DATEADD(MM,1,DATEADD(MM,DATEDIFF(MM,0,GETDATE())-1,0))-1))
)
SELECT DATEADD(DD,N,X),DATENAME(DW,DATEADD(DD,N,X)) FROM CTE,CTE2 WHERE DATENAME(DW,DATEADD(DD,N,X)) IN ('Monday','Tuesday')
```
|
You could use:
```
DECLARE @d DATE = GETDATE();
SELECT sub.prev_date
FROM (SELECT @d, MONTH(DATEADD(MM, -1, @d))) AS s(d,m)
CROSS APPLY (
SELECT DATEADD(D, c-1, DATEADD(MM, -1, DATEADD(DD, 1 - DAY(d),d))) AS prev_date
FROM (
VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),
(11),(12),(13),(14),(15),(16),(17),(18),(19),(20),
(21),(22),(23),(24),(25),(26),(27),(28),(29),(30),(31))AS x(c)
) AS sub
WHERE MONTH(sub.prev_date) = s.m
AND DATENAME(dw,sub.prev_date) IN ('Monday','Tuesday');
```
`LiveDemo`
Output:
```
ββββββββββββββ
β prev_date β
β βββββββββββββ£
β 2016-03-01 β
β 2016-03-07 β
β 2016-03-08 β
β 2016-03-14 β
β 2016-03-15 β
β 2016-03-21 β
β 2016-03-22 β
β 2016-03-28 β
β 2016-03-29 β
ββββββββββββββ
```
Warning:
`SQL Server` language should be `English` othewise `DATENAME` will not match.
You could also compare with `DATEPART` `weekday` but then you need to know `SET DATEFIRST` setting.
---
**EDIT:**
A bit shorter:
```
DECLARE @d DATE = '2015-01-01';
SELECT sub.prev_date
FROM (SELECT DATEADD(DD, c - DAY(@d),DATEADD(MM, -1, @d)) AS prev_date
FROM (VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),
(11),(12),(13),(14),(15),(16),(17),(18),(19),(20),
(21),(22),(23),(24),(25),(26),(27),(28),(29),(30),(31))AS x(c)) AS sub
WHERE MONTH(sub.prev_date) = MONTH(DATEADD(MM, -1, @d))
AND DATENAME(dw,sub.prev_date) IN ('Monday','Tuesday');
```
`LiveDemo`
|
How to get all Mondays or Tuesdays of Previous Month
|
[
"",
"sql",
"sql-server",
"sql-server-2008",
"t-sql",
""
] |
I need to copy data from one table to another. the two tables have almost the same structure, but are in different databases.
i tried
```
INSERT INTO db1.public.table2(
id,
name,
adress,
lat,
lng
)
SELECT
id,
name,
adress,
lat
lng
FROM db2.public.table2;
```
wenn i try this, i get error cross database ... not implemented
|
This is a really straightforward task. Just use dblink for this purpose:
```
INSERT INTO t(a, b, c)
SELECT a, b, c FROM dblink('host=xxx user=xxx password=xxx dbname=xxx', 'SELECT a, b, c FROM t') AS x(a integer, b integer, c integer)
```
If you need to fetch data from external database on a regular basis, it would be wise to define a server and user mapping. Then, you could use shorter statement:
```
dblink('yourdbname', 'your query')
```
|
There's also another way to do it. If dblink extension is not available, it's possible to copy data directly in command line, using pipe connecting standard input and ouput:
```
psql source_database -c 'COPY table TO stdout' | psql target_database -c 'COPY table FROM stdin'
```
But this is gonna work only in postgres 9.4 or higher
|
PostgreSQL copy/transfer data from one database to another
|
[
"",
"sql",
"postgresql",
"copy",
"cross-database",
""
] |
I want to SUM a lot of rows.
Is it quicker (or better practice, etc) to do Option A or Option B?
**Option A**
```
SELECT
[Person]
SUM([Value]) AS Total
FROM
Database
WHERE
[Value] > 0
GROUP BY
[Person]
```
**Option B**
```
SELECT
[Person]
SUM([Value]) AS Total
FROM
Database
GROUP BY
[Person]
```
So if I have, for Person X:
```
0, 7, 0, 6, 0, 5, 0, 0, 0, 4, 0, 9, 0, 0
```
Option A does:
```
a) Remove zeros
b) 7 + 6 + 5 + 4 + 9
```
Option B does:
```
a) 0 + 7 + 0 + 6 + 0 + 5 + 0 + 0 + 0 + 4 + 0 + 9 + 0 + 0
```
Option A has less summing, because it has fewer records to sum, because I've excluded the load that have a zero value. But Option B doesn't need a WHERE clause.
Anyone got an idea as to whether either of these are significantly quicker/better than the other? Or is it just something that doesn't matter either way?
Thanks :-)
|
Well, *if you have a filtered index that exactly matches the `where` clause*, and if that index removes a significant amount of data (as in: a good chunk of the data is zeros), then definitely the first... If you *don't* have such an index: then you'll need to test it *on your specific data*, but I would probably expect the unfiltered scenario to be *faster*, as it can do use a range of tricks to do the sum if it doesn't need to do branching etc.
However, the two examples aren't functionally equivalent at the moment (the second includes *negative* values, the first doesn't).
|
Assuming that `Value` is always positive the 2nd query might still return less rows if there's a `Person` with all zeroes.
Otherwise you should simply test actual runtime/CPU on a really large amount of rows.
|
Quicker with SQL to SUM 0 values or exclude them?
|
[
"",
"sql",
"t-sql",
""
] |
I am new to ssrs.
I want to get all the possible data for ssrs subscribed report, which are Available in ResportServer database.
I have found some queries, but that does not have proper data. It only works for single report.
I need list of unique subscription with it's data. If possible stored procedure is preferable.
My query:
```
SELECT
b.name AS JobName
, e.name
, e.path
, d.description
, a.SubscriptionID
, laststatus
, eventtype
, LastRunTime
, date_created
, date_modified
FROM ReportServer.dbo.ReportSchedule a
JOIN msdb.dbo.sysjobs b
ON a.ScheduleID = b.name
JOIN ReportServer.dbo.ReportSchedule c
ON b.name = c.ScheduleID
JOIN ReportServer.dbo.Subscriptions d
ON c.SubscriptionID = d.SubscriptionID
JOIN ReportServer.dbo.Catalog e
ON d.report_oid = e.itemid
WHERE e.name = 'Sales_Report'
```
Thanks in advance.
|
I have same requirement once as like you have now...
See below stored procedure..
```
CREATE PROCEDURE [dbo].[GetSubscriptionData]
AS
BEGIN
SET NOCOUNT ON;
WITH
[Sub_Parameters] AS
(
SELECT [SubscriptionID], [Parameters] = CONVERT(XML,a.[Parameters])
FROM [Subscriptions] a
),
[MySubscriptions] AS
(
SELECT DISTINCT [SubscriptionID], [ParameterName] = QUOTENAME(p.value('(Name)[1]', 'nvarchar(max)')), [ParameterValue] = p.value('(Value)[1]', 'nvarchar(max)')
FROM [Sub_Parameters] a
CROSS APPLY [Parameters].nodes('/ParameterValues/ParameterValue') t(p)
),
[SubscriptionsAnalysis] AS
(
SELECT a.[SubscriptionID], a.[ParameterName], [ParameterValue] =
(
SELECT STUFF((SELECT [ParameterValue] + ', ' as [text()]
FROM [MySubscriptions]
WHERE [SubscriptionID] = a.[SubscriptionID] AND [ParameterName] = a.[ParameterName]
FOR XML PATH('') ),1, 0, '') +''
)
FROM [MySubscriptions] a
GROUP BY a.[SubscriptionID],a.[ParameterName]
)
SELECT
DISTINCT (a.[SubscriptionID]),
c.[UserName] AS Owner,
b.Name,
b.Path,
a.[Locale],
a.[InactiveFlags],
d.[UserName] AS Modified_by,
a.[ModifiedDate],
a.[Description],
a.[LastStatus],
a.[EventType],
a.[LastRunTime],
a.[DeliveryExtension],
a.[Version],
sch.StartDate,
--e.[ParameterName],
--LEFT(e.[ParameterValue],LEN(e.[ParameterValue])-1) as [ParameterValue],
SUBSTRING(b.PATH,2,LEN(b.PATH)-(CHARINDEX('/',REVERSE(b.PATH))+1)) AS ProjectName
FROM
[Subscriptions] a
INNER JOIN [Catalog] AS b ON a.[Report_OID] = b.[ItemID]
Inner Join ReportSchedule as RS on rs.SubscriptionID = a.SubscriptionID
INNER JOIN Schedule AS Sch ON Sch.ScheduleID = rs.ScheduleID
LEFT OUTER JOIN [Users] AS c ON a.[OwnerID] = c.[UserID]
LEFT OUTER JOIN [Users] AS d ON a.MODIFIEDBYID = d.Userid
LEFT OUTER JOIN [SubscriptionsAnalysis] AS e ON a.SubscriptionID = e.SubscriptionID;
END
```
> This is simplified query to get all SSRS Subscriptions
```
SELECT USR.UserName AS SubscriptionOwner
,SUB.ModifiedDate
,SUB.[Description]
,SUB.EventType
,SUB.DeliveryExtension
,SUB.LastStatus
,SUB.LastRunTime
,SCH.NextRunTime
,SCH.Name AS ScheduleName
,CAT.[Path] AS ReportPath
,CAT.[Description] AS ReportDescription
FROM dbo.Subscriptions AS SUB
INNER JOIN dbo.Users AS USR
ON SUB.OwnerID = USR.UserID
INNER JOIN dbo.[Catalog] AS CAT
ON SUB.Report_OID = CAT.ItemID
INNER JOIN dbo.ReportSchedule AS RS
ON SUB.Report_OID = RS.ReportID
AND SUB.SubscriptionID = RS.SubscriptionID
INNER JOIN dbo.Schedule AS SCH
ON RS.ScheduleID = SCH.ScheduleID
ORDER BY USR.UserName, CAT.[Path];
```
if you still have any query, comment it..
|
In case you need to find the sql server agent Job use this updated code
```
SET NOCOUNT ON;
WITH
[Sub_Parameters] AS
(
SELECT [SubscriptionID], [Parameters] = CONVERT(XML,a.[Parameters])
FROM [Subscriptions] a
),
[MySubscriptions] AS
(
SELECT DISTINCT [SubscriptionID], [ParameterName] = QUOTENAME(p.value('(Name)[1]', 'nvarchar(max)')), [ParameterValue] = p.value('(Value)[1]', 'nvarchar(max)')
FROM [Sub_Parameters] a
CROSS APPLY [Parameters].nodes('/ParameterValues/ParameterValue') t(p)
),
[SubscriptionsAnalysis] AS
(
SELECT a.[SubscriptionID], a.[ParameterName], [ParameterValue] =
(
SELECT STUFF((SELECT [ParameterValue] + ', ' as [text()]
FROM [MySubscriptions]
WHERE [SubscriptionID] = a.[SubscriptionID] AND [ParameterName] = a.[ParameterName]
FOR XML PATH('') ),1, 0, '') +''
)
FROM [MySubscriptions] a
GROUP BY a.[SubscriptionID],a.[ParameterName]
)
SELECT
DISTINCT (a.[SubscriptionID]),
j.name AS SQLServerAgentJob,
c.[UserName] AS Owner,
b.Name,
b.Path,
a.[Locale],
a.[InactiveFlags],
d.[UserName] AS Modified_by,
a.[ModifiedDate],
a.[Description],
a.[LastStatus],
a.[EventType],
a.[LastRunTime],
a.[DeliveryExtension],
a.[Version],
sch.StartDate,
--e.[ParameterName],
--LEFT(e.[ParameterValue],LEN(e.[ParameterValue])-1) as [ParameterValue],
SUBSTRING(b.PATH,2,LEN(b.PATH)-(CHARINDEX('/',REVERSE(b.PATH))+1)) AS ProjectName
FROM [Subscriptions] a
INNER JOIN [Catalog] AS b ON a.[Report_OID] = b.[ItemID]
Inner Join ReportSchedule as RS on rs.SubscriptionID = a.SubscriptionID
INNER JOIN Schedule AS Sch ON Sch.ScheduleID = rs.ScheduleID
LEFT OUTER JOIN [Users] AS c ON a.[OwnerID] = c.[UserID]
LEFT OUTER JOIN [Users] AS d ON a.MODIFIEDBYID = d.Userid
LEFT OUTER JOIN [SubscriptionsAnalysis] AS e ON a.SubscriptionID = e.SubscriptionID
LEFT JOIN msdb.dbo.sysobjects so ON rs.ScheduleID= so.name
INNER JOIN msdb.dbo.sysjobs J ON CONVERT( NVARCHAR(128), RS.ScheduleID ) = J.name
INNER JOIN msdb.dbo.sysjobschedules JS ON J.job_id = JS.job_id
```
|
How to get all SSRS Subscription Data using stored procedure?
|
[
"",
"sql",
"sql-server",
"reporting-services",
""
] |
I have an employee saved search that needs to return the internal ID of the {supervisor} field, by default it displays the employee ID (not internal ID) and the supervisors full name.
thanks
|
Please try using formula,set value {supervisor.id}
|
Scroll down to the bottom of the field selection list and find "Supervisor fields...", then select "Internal ID" from the ensuing popup.
This is how you do JOINs in the search UI.
|
NetSuite employee Saved Search needs to return employee superior internal ID
|
[
"",
"sql",
"netsuite",
""
] |
I'm using PL/SQL if that matters.
```
Table = Stuff
ID: FRUIT:
100 Apple
100 Grape
200 Apple
200 Orange
550 Apple
700 Orange
800 Orange
900 Grape
... ...
```
I want to list all of the Apples and their IDs that do NOT share the same ID as Orange. How do I go about doing this?
The output should be:
```
100 Apple
550 Apple
```
|
You can do this with a subquery so you effectively pick all of the ID's for Oranges out in this subquery then pick all of the fruit which are Apples and ID's aren't in the subquery. Something like this;
```
SELECT *
FROM stuff
WHERE fruit = 'Apple'
AND ID NOT IN (SELECT ID FROM stuff WHERE fruit = 'Orange')
```
|
Or you could do it using the MINUS set operator:
```
SELECT a.ID, a.FRUIT
FROM STUFF a
WHERE a.FRUIT = 'Apple'
MINUS
SELECT b.ID, 'Apple' AS FRUIT
FROM STUFF b
WHERE b.FRUIT = 'Orange'
```
Best of luck.
|
Simple SQL Query 1
|
[
"",
"sql",
"oracle",
""
] |
I have two tables that have a one-many relationship, and I would like to put together a query that follows a rule to join a particular row in the 'many' table to a row in the 'one' table.
user table:
```
ββββββ¦βββββββββββββββ¦
β id β name β
β βββββ¬βββββββββββββββ¬
β 1 β user 1 β
β 2 β user 2 β
β 3 β user 3 β
β 4 β user 4 β
ββββββ©βββββββββββββββ©
```
Messages table:
```
ββββββ¦βββββββββββββββ¦ββββββββββββ¦ββββββββββ
β id β Text β user_id β date β
β βββββ¬βββββββββββββββ¬ββββββββββββ¬ββββββββββ£
β 1 β Hello β 1 β 3/31 β
β 2 β World β 1 β 4/1 β
β 3 β Test message β 2 β 4/2 β
β 4 β Another test β 3 β 4/4 β
ββββββ©βββββββββββββββ©ββββββββββββ©ββββββββββ
```
I am trying to perform a single join from user to messages to get the most recent message for the user. user 2 would have 'test message', user 3 would have 'another test'. User 1 is the one I cannot figure out - I would like to have one row for user 1 returned 'world', based on the fact that it has the most recent date, but I do not see a join that has the capability to perform filtering on a joined table.
|
Try something like this:
```
SELECT
message_id
, [user_id]
, name
, [Text]
, [date]
FROM
(
SELECT
M.id AS message_id
, U.id AS [user_id]
, name
, [Text]
, [date]
--Rank rows for each users by date
, RANK() OVER(PARTITION BY M.[user_id] ORDER BY [date] DESC, M.id DESC) AS Rnk
FROM
@messages AS M
INNER JOIN
@users AS U
ON M.[user_id] = U.id
) AS Tmp
WHERE
--The latest date
Tmp.Rnk = 1
```
This code work in SQL Server 2012 and newer.
[](https://i.stack.imgur.com/N5m9s.png)
|
You can join the tables and than filter the results:
```
select tbl.name , tbl.Text from
(select User.name,
Messages.Text,
RANK() OVER (PARTITION BY User.name ORDER BY Messages.date desc) AS rank
from User inner join Messages
on User.id = Messages.user_id) as tbl
where rank=1
```
|
SQL: filter a joined table
|
[
"",
"sql",
""
] |
If I want to know for each user how much time they spent on the intranet on a certain day, I can use a custom function - 2 examples:
```
select * from [dbo].[usertime]('2016-04-08')
userid totaltime
-----------------
1 4430
2 11043
5 13045
select * from [dbo].[usertime]('2016-04-09')
userid totaltime
-----------------
1 345
3 12066
9 15344
```
I have no control over the function and can only use its output. The `totaltime` is in seconds.
From another table, I can select the dates in a year:
```
select * from dates;
date
----------
2016-01-01
...
2016-04-08
2016-04-09
```
I would like to run the custom function `usertime` for each `date` in the `dates` table and store the result in a temp table, as follows:
```
userid 2016-01-01 .. 2016-04-08 2016-04-09
----------------------------------------------
1 .. 4430 345
2 .. 11043 0
3 .. 0 12066
5 .. 13045 0
9 .. 0 15344
```
This would require me to call `usertime` in a loop, pseudo:
```
create table #usertime
(
userid int
date date
seconds int
)
select * into #dates from dates;
foreach (#dates as _date)
update #usertime with [dbo].[usertime](_date)
select * from #usertime
userid 2016-01-01 .. 2016-04-08 2016-04-09
----------------------------------------------
1 .. 4430 345
2 .. 11043 0
3 .. 0 12066
5 .. 13045 0
9 .. 0 15344
```
I understand I need dynamic SQL here to loop with a different date every time and `stuff()` to create multiple columns from rows in the resultset coming from `#usertime`. But I do not understand on how to use these funcionalities. Could anyone help me along?
|
There's no need for any looping (something that should almost always be avoided in SQL).
```
SELECT
T.userid,
D._date,
T.totaltime
FROM
#dates D -- Probably no need for a temporary table either...
CROSS APPLY dbo.usertime(D._date) T
```
If you need to then pivot those results, then you can do that as well.
|
It's easier to use a permanent table for the dynamic table structure due to temp table scoping. If you must use a #usertime temp table for some reason, you'll need to nest dynamic SQL, which is pretty ugly.
Below is an example of how you can pivot the results from rows to columns dynamically.
```
SET NOCOUNT ON;
IF OBJECT_ID(N'dbo.TempUserTime', 'U') IS NOT NULL
DROP TABLE dbo.TempUserTime;
IF OBJECT_ID(N'tempdb..#UnpivitedUserTime', 'U') IS NOT NULL
DROP TABLE #UnpivitedUserTime;
--load temp table with unpivoted data
SELECT date, userid, totaltime
INTO #UnpivitedUserTime
FROM dates
CROSS APPLY dbo.userTime(date)
WHERE date BETWEEN '2016-01-01' AND '2016-04-09';
--create pivot table structure with userid and one column per date
DECLARE @SQL nvarchar(MAX) = 'CREATE TABLE dbo.TempUserTime(userid int NOT NULL';
SELECT @SQL += ',' + QUOTENAME(CONVERT(char(10), date, 121)) + ' int NULL'
FROM dates
WHERE date BETWEEN '2016-01-01' AND '2016-04-09';
SELECT @SQL += ');'
EXEC(@SQL);
--insert a row into pivot table for each user
INSERT INTO dbo.TempUserTime (userid)
SELECT DISTINCT userid FROM #UnpivitedUserTime;
--generate an update statement for each date to update all users
SET @SQL = N'';
SELECT @SQL += N'UPDATE dbo.TempUserTime
SET ' + QUOTENAME(CONVERT(char(10), date, 121)) + N' = (
SELECT totaltime
FROM #UnpivitedUserTime AS u
WHERE
u.date = ''' + + CONVERT(char(10), date, 121) + + N'''
AND u.userid = TempUserTime.userid
);
'
FROM dates
CROSS APPLY dbo.userTime(date)
WHERE date BETWEEN '2016-01-01' AND '2016-04-09';
--execute update batch
EXEC(@SQL);
--return results
SELECT *
FROM dbo.TempUserTime
ORDER BY userid;
IF OBJECT_ID(N'dbo.TempUserTime', 'U') IS NOT NULL
DROP TABLE dbo.TempUserTime;
IF OBJECT_ID(N'tempdb..#UnpivitedUserTime', 'U') IS NOT NULL
DROP TABLE #UnpivitedUserTime;
GO
```
|
How do I fill a temp table iteratively and stuff() the result into columns?
|
[
"",
"sql",
"sql-server",
""
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.