Instruction stringlengths 14 778 | input_code stringlengths 0 4.24k | output_code stringlengths 1 5.44k |
|---|---|---|
Add Queries vulnerable to SQL injection | /*
Author: Bert Wagner
Source link: https://blog.bertwagner.com/warning-are-your-queries-vulnerable-to-sql-injection-db914fb39668
*/
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT
ROUTINE_CATALOG
, ROUTINE_SCHEMA
, ROUTINE_NAME
, ROUTINE_TYPE
, ROUTINE_DEFINITION
FROM
INFORMATION_SCHEMA.ROUTINES
WHERE
REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(ROUTINE_DEFINITION,CHAR(0),''),CHAR(9),''),CHAR(10),''),CHAR(11),''),CHAR(12),''),CHAR(13),''),CHAR(14),''),CHAR(160),''),' ','')
LIKE '%+@%'
AND
( -- Only if executes a dynamic string
ROUTINE_DEFINITION LIKE '%EXEC(%'
OR ROUTINE_DEFINITION LIKE '%EXECUTE%'
OR ROUTINE_DEFINITION LIKE '%sp_executesql%'
);
| |
Create function to locate points | -- Locate user defined point on network
-- Author: Christopher Fricke
-- License: MIT
-- DROP FUNCTION routing.get_waypoint(geometry);
CREATE OR REPLACE FUNCTION routing.get_waypoint(test_geom geometry) RETURNS int AS $$
SELECT id::integer FROM routing.ways_vertices_pgr ORDER BY the_geom <-> test_geom LIMIT 1;;
$$ LANGUAGE SQL;
| |
Add SQL to move items to archive table | -- Archive anything older than 1 month that is read.
--
-- This is to keep the table used for active interaction smaller and quicker.
--INSERT INTO rss_item_archive
--(id, title, description, link, publication_date, rss_feed_id, create_time,
-- update_time, guid)
--SELECT ri.id, ri.title, ri.description, ri.link, ri.publication_date,
-- ri.rss_feed_id, ri.create_time, ri.update_time, ri.guid
--FROM rss_item ri
--LEFT JOIN rss_item_state ris ON ris.item_id = ri.id
--WHERE ri.publication_date < NOW() - '1 months'::INTERVAL AND
--COALESCE(ris.state, 'unread') = 'read'
--;
DELETE FROM rss_item ri
WHERE
ri.id IN (
SELECT ris.item_id
FROM rss_item_state ris
WHERE ris.item_id = ri.id AND
ris.state = 'read'
) AND
ri.publication_date < NOW() - '1 months'::INTERVAL
;
| |
Update SQL file to create schema from ERD. | -- Test Script for Database Migration
CREATE TABLE ACCOUNT (
USER_ID INT PRIMARY KEY,
USER_NAME VARCHAR(100) NOT NULL
);
CREATE TABLE PROJECT (
PROJECT_ID INT PRIMARY KEY,
PROJECT_NAME VARCHAR(100) NOT NULL
);
| CREATE TABLE ACCOUNT(
USER_ID IDENTITY NOT NULL,
USER_NAME VARCHAR(100) NOT NULL,
MAIL_ADDRESS VARCHAR(100) NOT NULL,
PASSWORD VARCHAR(20) NOT NULL,
USER_TYPE INT DEFAULT 0 NOT NULL,
URL VARCHAR(200),
REGISTERED_DATE TIMESTAMP NOT NULL,
UPDATED_DATE TIMESTAMP NOT NULL,
LAST_LOGIN_DATE TIMESTAMP
);
CREATE TABLE PROJECT(
PROJECT_ID IDENTITY NOT NULL,
PROJECT_NAME VARCHAR(100) NOT NULL,
USER_ID INT NOT NULL,
PROJECT_TYPE INT DEFAULT 0 NOT NULL,
DESCRIPTION TEXT(10),
REGISTERED_DATE TIMESTAMP NOT NULL,
UPDATED_DATE TIMESTAMP NOT NULL,
LAST_ACTIVITY_DATE TIMESTAMP NOT NULL
);
CREATE TABLE PROJECT_ACCOUNT(
PROJECT_ID INT NOT NULL,
USER_ID INT NOT NULL
);
ALTER TABLE ACCOUNT ADD CONSTRAINT IDX_ACCOUNT_PK PRIMARY KEY (USER_ID);
ALTER TABLE ACCOUNT ADD CONSTRAINT IDX_ACCOUNT_1 UNIQUE (MAIL_ADDRESS);
ALTER TABLE PROJECT ADD CONSTRAINT IDX_PROJECT_PK PRIMARY KEY (PROJECT_ID);
ALTER TABLE PROJECT ADD CONSTRAINT IDX_PROJECT_1 UNIQUE (PROJECT_NAME, USER_ID);
ALTER TABLE PROJECT_ACCOUNT ADD CONSTRAINT IDX_PROJECT_ACCOUNT_PK PRIMARY KEY (PROJECT_ID, USER_ID);
ALTER TABLE PROJECT_ACCOUNT ADD CONSTRAINT IDX_PROJECT_ACCOUNT_FK0 FOREIGN KEY (PROJECT_ID) REFERENCES PROJECT (PROJECT_ID);
ALTER TABLE PROJECT_ACCOUNT ADD CONSTRAINT IDX_PROJECT_ACCOUNT_FK1 FOREIGN KEY (USER_ID) REFERENCES ACCOUNT (USER_ID);
|
Change users_vocabulary vocabulary_id data type. | # Move current id to hash column
ALTER TABLE `vocabulary` ADD hash BINARY(16) NOT NULL;
UPDATE `vocabulary` SET hash = id;
# Change id to auto-increment int
ALTER TABLE `vocabulary` DROP PRIMARY KEY;
UPDATE vocabulary SET id = 0;
ALTER TABLE `vocabulary` MODIFY id int(11) NOT NULL PRIMARY KEY AUTO_INCREMENT;
# Update users_vocabulary vocabulary_id column
UPDATE `users_vocabulary`
LEFT OUTER JOIN `vocabulary` ON(vocabulary.hash=users_vocabulary.vocabulary_id)
SET vocabulary_id = vocabulary.id;
| # Move current id to hash column
ALTER TABLE `vocabulary` ADD hash BINARY(16) NOT NULL;
UPDATE `vocabulary` SET hash = id;
# Change id to auto-increment int
ALTER TABLE `vocabulary` DROP PRIMARY KEY;
UPDATE vocabulary SET id = 0;
ALTER TABLE `vocabulary` MODIFY id int(11) NOT NULL PRIMARY KEY AUTO_INCREMENT;
# Update users_vocabulary vocabulary_id column
ALTER TABLE `users_vocabulary` MODIFY vocabulary_id int(11) NOT NULL;
UPDATE `users_vocabulary`
LEFT OUTER JOIN `vocabulary` ON(vocabulary.hash=users_vocabulary.vocabulary_id)
SET vocabulary_id = vocabulary.id;
|
Remove Process.PgBackendPID as this column is not set anymore. We instead log it per execution to Log. | CREATE TABLE cron.Processes (
ProcessID serial NOT NULL,
JobID integer NOT NULL REFERENCES cron.Jobs(JobID),
Running boolean NOT NULL DEFAULT FALSE,
FirstRunStartedAt timestamptz,
FirstRunFinishedAt timestamptz,
LastRunStartedAt timestamptz,
LastRunFinishedAt timestamptz,
BatchJobState batchjobstate,
LastSQLSTATE text,
LastSQLERRM text,
PgBackendPID integer,
PRIMARY KEY (ProcessID),
CHECK(LastSQLSTATE ~ '^[0-9A-Z]{5}$')
);
ALTER TABLE cron.Processes OWNER TO pgcronjob;
| CREATE TABLE cron.Processes (
ProcessID serial NOT NULL,
JobID integer NOT NULL REFERENCES cron.Jobs(JobID),
Running boolean NOT NULL DEFAULT FALSE,
FirstRunStartedAt timestamptz,
FirstRunFinishedAt timestamptz,
LastRunStartedAt timestamptz,
LastRunFinishedAt timestamptz,
BatchJobState batchjobstate,
LastSQLSTATE text,
LastSQLERRM text,
PRIMARY KEY (ProcessID),
CHECK(LastSQLSTATE ~ '^[0-9A-Z]{5}$')
);
ALTER TABLE cron.Processes OWNER TO pgcronjob;
|
Fix can challenge for performance query | SELECT * FROM users WHERE
(NOT EXISTS (SELECT * FROM challenges WHERE userNamenumber = $1 AND performanceid = $2)) AND
(SELECT voluntary FROM manage AS m WHERE m.usernamenumber = $1 AND m.performanceid = $2 ORDER BY id DESC LIMIT 1) OR
(
SELECT substring($3, 2)::integer > 12 AND
(
NOT EXISTS (SELECT * FROM manage AS m WHERE m.usernamenumber = $1 AND m.performanceid = $2 ORDER BY id DESC LIMIT 1) OR
(SELECT reason = 'Closed Spot' FROM manage AS m WHERE m.usernamenumber = $1 AND m.performanceid = $2 ORDER BY id DESC LIMIT 1)
)
);
| SELECT * FROM users WHERE
NOT EXISTS (SELECT * FROM challenges WHERE userNamenumber = $1 AND performanceid = $2) AND
((SELECT COALESCE(voluntary, true) FROM manage AS m WHERE m.usernamenumber = $1 AND m.performanceid = $2 ORDER BY id DESC LIMIT 1) OR
(
SELECT substring($3, 2)::integer > 12 AND
(
NOT EXISTS (SELECT * FROM manage AS m WHERE m.usernamenumber = $1 AND m.performanceid = $2 ORDER BY id DESC LIMIT 1) OR
(SELECT reason = 'Closed Spot' FROM manage AS m WHERE m.usernamenumber = $1 AND m.performanceid = $2 ORDER BY id DESC LIMIT 1)
)
) AND nameNumber = $1);
|
Add the script to create db with proper encoding | CREATE TABLE "public"."media" (
"id" serial NOT NULL,
"iid" text NOT NULL,
"document" jsonb NOT NULL,
"created_at" timestamp NOT NULL,
PRIMARY KEY ("id")
);
CREATE TABLE "public"."subscriptions" (
"id" serial NOT NULL,
"name" text NOT NULL,
"lat" real NOT NULL,
"long" real NOT NULL,
"radius" int NOT NULL,
PRIMARY KEY ("id")
);
ALTER TABLE "public"."media"
ADD COLUMN "subscription_id" int NOT NULL,
ADD FOREIGN KEY ("subscription_id") REFERENCES "public"."subscriptions"("id"); | #CREATE DATABASE spoto OWNER spoto ENCODING 'UTF-8' template template0;
CREATE TABLE "public"."media" (
"id" serial NOT NULL,
"iid" text NOT NULL,
"document" jsonb NOT NULL,
"created_at" timestamp NOT NULL,
PRIMARY KEY ("id")
);
CREATE TABLE "public"."subscriptions" (
"id" serial NOT NULL,
"name" text NOT NULL,
"lat" real NOT NULL,
"long" real NOT NULL,
"radius" int NOT NULL,
PRIMARY KEY ("id")
);
ALTER TABLE "public"."media"
ADD COLUMN "subscription_id" int NOT NULL,
ADD FOREIGN KEY ("subscription_id") REFERENCES "public"."subscriptions"("id"); |
Test the simple MPU query | --drop proc sspBatchMPU -- Monthly Paying User
create proc sspBatchMPU
as
set nocount on
declare @Day30DT datetimeoffset(7)
declare @CurrentDT datetimeoffset(7)
declare @nowdt datetime
declare @MPU bigint
set @nowdt = (select getutcdate())
set @CurrentDT = ((SELECT DATETIMEFROMPARTS (DATEPART(year, @nowdt), DATEPART(month,@nowdt), DATEPART(day, @nowdt), DATEPART(hour, @nowdt), 0, 0, 0 )))
set @Day30DT = (dateadd(day, -30, @CurrentDT))
set @MPU = (select count(*) from MemberItemPurchases where PurchaseDT between @Day30DT and @CurrentDT AND PurchaseCancelYN like 'N')
insert into StatsData(CategoryName, CountNum, Fields, Groups) values('MPU', @MPU, CONVERT(nvarchar(8), GETUTCDATE(), 112), '')
GO | --drop proc sspBatchMPU -- Monthly Paying User
create proc sspBatchMPU
as
set nocount on
declare @Day30DT datetimeoffset(7)
declare @CurrentDT datetimeoffset(7)
declare @nowdt datetime
declare @MPU bigint
set @nowdt = (select getutcdate())
set @CurrentDT = ((SELECT DATETIMEFROMPARTS (DATEPART(year, @nowdt), DATEPART(month,@nowdt), DATEPART(day, @nowdt), DATEPART(hour, @nowdt), 0, 0, 0 )))
set @Day30DT = (dateadd(day, -30, @CurrentDT))
set @MPU = (select count(*) from MemberItemPurchases where PurchaseDT between @Day30DT and @CurrentDT AND PurchaseCancelYN like 'N')
insert into StatsData(CategoryName, CountNum, Fields, Groups) values('MPU', @MPU, CONVERT(nvarchar(8), GETUTCDATE(), 112), '')
GO
------------------------------------------------------------------
-- run test
--exec sspBatchMPU
------------------------------------------------------------------
/*
select * from StatsData order by createdat desc
select * from Members
select * from MemberItemPurchases
select count(*) from MemberItemPurchases where PurchaseDT between '2016-05-15 15:00:03.1749825 +00:00' and sysutcdatetime()
-- test data value
update Members set LastLoginDT = sysutcdatetime() where memberid like 'bbb'
update Members set LastLoginDT = sysutcdatetime() where memberid like 'ccc'
update Members set LastLoginDT = sysutcdatetime() where memberid like 'ddd'
select sysutcdatetime()
select dateadd(day, -30, sysutcdatetime())
select CONVERT(nvarchar(20), getutcdate(), 112)
declare @nowdt datetime
set @nowdt = (select getutcdate())
SELECT DATEPART(year, @nowdt) + '-' + DATEPART(month,@nowdt) + '-' + DATEPART(day, @nowdt);
SELECT convert(datetime, getutcdate(), 121) -- yyyy-mm-dd hh:mm:ss.mmm
*/ |
Adjust a comment line that prevents cloud from deploying | --
-- PostgreSQL database dump
--
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
CREATE FUNCTION update_updated_at_column() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$;
CREATE TABLE deployment_tracker (
id integer NOT NULL,
dbname text,
deployment_type text NOT NULL,
deployment_name text,
deployment_outcome text,
created_at timestamp without time zone DEFAULT now(),
updated_at timestamp without time zone DEFAULT now(),
is_active boolean DEFAULT true,
deployed_by character varying(32),
deployed_as character varying(32),
reference_url text
);
CREATE SEQUENCE deployment_tracker_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE deployment_tracker_id_seq OWNED BY deployment_tracker.id;
CREATE INDEX index_deployment_on_deployment_name ON deployment_tracker USING btree (deployment_name);
CREATE TRIGGER update_updated_at_modtime BEFORE UPDATE ON deployment_tracker FOR EACH ROW EXECUTE PROCEDURE update_updated_at_column();
ALTER TABLE ONLY deployment_tracker ALTER COLUMN id SET DEFAULT nextval('deployment_tracker_id_seq'::regclass);
--
-- PostgreSQL database dump complete
--
| --
-- PostgreSQL database dump
--
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
CREATE FUNCTION update_updated_at_column() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$;
CREATE TABLE deployment_tracker (
id integer NOT NULL,
dbname text,
deployment_type text NOT NULL,
deployment_name text,
deployment_outcome text,
created_at timestamp without time zone DEFAULT now(),
updated_at timestamp without time zone DEFAULT now(),
is_active boolean DEFAULT true,
deployed_by character varying(32),
deployed_as character varying(32),
reference_url text
);
CREATE SEQUENCE deployment_tracker_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE deployment_tracker_id_seq OWNED BY deployment_tracker.id;
CREATE INDEX index_deployment_on_deployment_name ON deployment_tracker USING btree (deployment_name);
CREATE TRIGGER update_updated_at_modtime BEFORE UPDATE ON deployment_tracker FOR EACH ROW EXECUTE PROCEDURE update_updated_at_column();
ALTER TABLE ONLY deployment_tracker ALTER COLUMN id SET DEFAULT nextval('deployment_tracker_id_seq'::regclass);
--
-- PostgreSQL database dump complete
--
|
Update migration to add igg | create table sguser (
id serial not null primary key,
email text not null unique,
password text not null,
username text,
credentials text,
challenges text,
emailverified boolean,
verificationtoken boolean,
status text,
modified timestamp with time zone default now(),
realm text,
firstname text,
lastname text,
roles json,
uid text
);
create table accesstoken (
id text not null unique primary key,
ttl integer not null,
created timestamp with time zone default now(),
userid integer not null references sguser(id)
);
create table acl (
id serial not null primary key,
model text,
property text,
accesstype text,
permission text,
principaltype text,
principalid text
);
create table role (
id serial not null primary key,
name text not null unique,
description text,
created timestamp with time zone default now(),
modified timestamp with time zone
);
create table rolemapping (
id serial not null primary key,
principaltype text not null,
principalid text not null,
roleid text not null
);
create function update_modified_column()
returns trigger as $$
begin
NEW.modified = now();
return NEW;
end;
$$ language 'plpgsql';
create trigger update_sguser_modified_column
before update on sguser for each row execute procedure update_modified_column();
| create table sguser (
id serial not null primary key,
email text not null unique,
password text not null,
username text,
credentials text,
challenges text,
emailverified boolean,
verificationtoken boolean,
status text,
modified timestamp with time zone default now(),
realm text,
firstname text,
lastname text,
roles json,
uid text,
igg bigint
);
create table accesstoken (
id text not null unique primary key,
ttl integer not null,
created timestamp with time zone default now(),
userid integer not null references sguser(id)
);
create table acl (
id serial not null primary key,
model text,
property text,
accesstype text,
permission text,
principaltype text,
principalid text
);
create table role (
id serial not null primary key,
name text not null unique,
description text,
created timestamp with time zone default now(),
modified timestamp with time zone
);
create table rolemapping (
id serial not null primary key,
principaltype text not null,
principalid text not null,
roleid text not null
);
create function update_modified_column()
returns trigger as $$
begin
NEW.modified = now();
return NEW;
end;
$$ language 'plpgsql';
create trigger update_sguser_modified_column
before update on sguser for each row execute procedure update_modified_column();
|
Drop database table when running init_b | CREATE TABLE measurements (
id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,
time_stamp DATETIME,
stroom_dal FLOAT,
stroom_piek FLOAT,
stroom_current FLOAT,
gas FLOAT,
diff_stroom_dal FLOAT,
diff_stroom_piek FLOAT,
diff_gas FLOAT
);
| DROP TABLE measurements;
CREATE TABLE measurements (
id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,
time_stamp DATETIME,
stroom_dal FLOAT,
stroom_piek FLOAT,
stroom_current FLOAT,
gas FLOAT,
diff_stroom_dal FLOAT,
diff_stroom_piek FLOAT,
diff_gas FLOAT
);
|
Add LOGIN permissions to new roles | CREATE ROLE metacpan;
CREATE ROLE "metacpan-api";
-- make things easier for when we're poking around from inside the container
CREATE USER root;
| CREATE ROLE metacpan WITH LOGIN;
CREATE ROLE "metacpan-api" WITH LOGIN;
-- make things easier for when we're poking around from inside the container
CREATE USER root;
|
Update SQL script for reset | update lfv2_account set cup_cagnoute = false, cup_points = 0, cup_rank = 99;
delete from lfv2_cup_vote;
delete from lfv2_cup_match;
update lfv2_cup set name = 'EURO 2016'; | update lfv2_account set cup_cagnoute = false, cup_points = 0, cup_rank = 99, stat_cup_scores = 0, stat_cup_matchs = 0, stat_cup_results = 0;
delete from lfv2_cup_vote;
delete from lfv2_cup_match;
update lfv2_cup set name = 'EURO 2016'; |
Fix typo in db patch | -- Add the ratings column to the event_comments table
ALTER TABLE event_comments ADD COLUMN `rating` int(11) default NULL, ;
-- Increase patch count
INSERT INTO patch_history SET patch_number = 55; | -- Add the ratings column to the event_comments table
ALTER TABLE event_comments ADD COLUMN `rating` int(11) default NULL;
-- Increase patch count
INSERT INTO patch_history SET patch_number = 55; |
Fix long name for TAS in update sql | UPDATE linked_system SET beskrivelse = 'Det Fælles Medicinkort' where kode = 'FMK';
UPDATE linked_system SET beskrivelse = 'Det Danske Vaccinationsregister' where kode = 'DDV';
UPDATE linked_system SET beskrivelse = 'Tilskudsadministration' where kode = 'TAS';
| UPDATE linked_system SET beskrivelse = 'Det Fælles Medicinkort' where kode = 'FMK';
UPDATE linked_system SET beskrivelse = 'Det Danske Vaccinationsregister' where kode = 'DDV';
UPDATE linked_system SET beskrivelse = 'Tilskudsansøgningsservicen' where kode = 'TAS';
|
Use single-quotes around string literals used in SQL statements | R"**(
BEGIN TRANSACTION;
ALTER TABLE trans
ADD comment TEXT DEFAULT "";
UPDATE config
SET value = "1.2"
WHERE key = 'version';
COMMIT;
)**"
| R"**(
BEGIN TRANSACTION;
ALTER TABLE trans
ADD comment TEXT DEFAULT '';
UPDATE config
SET value = '1.2'
WHERE key = 'version';
COMMIT;
)**"
|
Change Robs position to Project Advisor. | update xcolab_StaffMember set role="CCI Associate Director" where id_=149;
update xcolab_StaffMember set role="Project Manager" where id_=150;
update xcolab_StaffMember set categoryId=8, role="" where id_=155;
update xcolab_StaffMember set userId=1003305, photoUrl="" where id_=159;
update xcolab_StaffMember set photoUrl="https://fbcdn-sphotos-e-a.akamaihd.net/hphotos-ak-frc3/v/t1.0-9/559760_529631463785266_1319441340_n.jpg?oh=888f25bc1e9033c472058ca30bee0534&oe=550F9D89&__gda__=1426450045_30abccd54194dc9bd928c74a12dca7fe" where id_=161;
delete from xcolab_StaffMember where id_ = 317;
insert into xcolab_StaffMember (id_,userID, categoryId, firstNames, lastName, url, photoUrl, role, organization, sort) values(317, 1947856, 5,"","","","", "Software Engineer","", 9); | update xcolab_StaffMember set role="Project Advisor" where id_=149;
update xcolab_StaffMember set role="Project Manager" where id_=150;
update xcolab_StaffMember set categoryId=8, role="" where id_=155;
update xcolab_StaffMember set userId=1003305, photoUrl="" where id_=159;
update xcolab_StaffMember set photoUrl="https://fbcdn-sphotos-e-a.akamaihd.net/hphotos-ak-frc3/v/t1.0-9/559760_529631463785266_1319441340_n.jpg?oh=888f25bc1e9033c472058ca30bee0534&oe=550F9D89&__gda__=1426450045_30abccd54194dc9bd928c74a12dca7fe" where id_=161;
delete from xcolab_StaffMember where id_ = 317;
insert into xcolab_StaffMember (id_,userID, categoryId, firstNames, lastName, url, photoUrl, role, organization, sort) values(317, 1947856, 5,"","","","", "Software Engineer","", 9); |
Add settings for has_invitation field of csiprova2 meeting | -- Inizio script
/*!40101 SET character_set_client = latin1 */;
/*!40103 SET TIME_ZONE='+00:00' */;
SET AUTOCOMMIT=0;
START TRANSACTION;
-- Add schedule details to csi prova 2
update meeting_sessions
set warm_up_time = '14:00:00',
begin_time = '14:50:00',
day_part_type_id = (select dpt.id from day_part_types dpt where dpt.code = 'P')
where meeting_id = 14102;
COMMIT;
-- Fine script
| -- Inizio script
/*!40101 SET character_set_client = latin1 */;
/*!40103 SET TIME_ZONE='+00:00' */;
SET AUTOCOMMIT=0;
START TRANSACTION;
-- Add schedule details to csi prova 2
update meeting_sessions
set warm_up_time = '14:00:00',
begin_time = '14:50:00',
day_part_type_id = (select dpt.id from day_part_types dpt where dpt.code = 'P')
where meeting_id = 14102;
-- Set meeting invitation (assumes the invitation file has been copied into the filed
update meetings set has_invitation = true where id = 14102;
COMMIT;
-- Fine script
|
Fix SQL statement to prevent always asking for updates | #
# Table structure for table 'tx_tevglossary_domain_model_entry'
#
CREATE TABLE tx_tevglossary_domain_model_entry (
uid int(11) unsigned NOT NULL auto_increment,
pid int(11) unsigned DEFAULT '0' NOT NULL,
crdate int(11) unsigned DEFAULT '0' NOT NULL,
tstamp int(11) unsigned DEFAULT '0' NOT NULL,
cruser_id int(11) unsigned DEFAULT '0' NOT NULL,
deleted tinyint(4) unsigned DEFAULT '0' NOT NULL,
hidden tinyint(4) unsigned DEFAULT '0' NOT NULL,
term varchar(255) NOT NULL,
definition text NOT NULL,
PRIMARY KEY (uid),
KEY parent (pid),
KEY deleted (deleted),
KEY deleted (hidden)
);
| #
# Table structure for table 'tx_tevglossary_domain_model_entry'
#
CREATE TABLE tx_tevglossary_domain_model_entry (
uid int(11) unsigned NOT NULL auto_increment,
pid int(11) unsigned DEFAULT '0' NOT NULL,
crdate int(11) unsigned DEFAULT '0' NOT NULL,
tstamp int(11) unsigned DEFAULT '0' NOT NULL,
cruser_id int(11) unsigned DEFAULT '0' NOT NULL,
deleted tinyint(4) unsigned DEFAULT '0' NOT NULL,
hidden tinyint(4) unsigned DEFAULT '0' NOT NULL,
term varchar(255) DEFAULT '' NOT NULL,
definition text NOT NULL,
PRIMARY KEY (uid),
KEY parent (pid),
KEY deleted (deleted),
KEY deleted (hidden)
);
|
Add data to populate on re-deploy. | -- Note: When adding new tables, also add the tables to dropSchema.sql and clearSchema in order for tests to work correctly.
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE TABLE IF NOT EXISTS users (
user_id bigserial PRIMARY KEY ,
username text UNIQUE NOT NULL,
email text NOT NULL,
password text NOT NULL,
first_name text NOT NULL,
last_name text NOT NULL
);
CREATE TABLE IF NOT EXISTS groups (
group_id bigserial PRIMARY KEY,
name text NOT NULL,
created_on bigint NOT NULL,
admin bigint REFERENCES users ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS group_contains_user (
group_id bigint NOT NULL REFERENCES groups,
user_id bigint NOT NULL REFERENCES users,
PRIMARY KEY (group_id, user_id)
);
CREATE TABLE IF NOT EXISTS user_invited_to_group (
invite_id bigserial PRIMARY KEY,
user_id bigint NOT NULL references users,
group_id bigint NOT NULL references groups
); | -- Note: When adding new tables, also add the tables to dropSchema.sql and clearSchema in order for tests to work correctly.
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE TABLE IF NOT EXISTS users (
user_id bigserial PRIMARY KEY ,
username text UNIQUE NOT NULL,
email text NOT NULL,
password text NOT NULL,
first_name text NOT NULL,
last_name text NOT NULL
);
CREATE TABLE IF NOT EXISTS groups (
group_id bigserial PRIMARY KEY,
name text NOT NULL,
created_on bigint NOT NULL,
admin bigint REFERENCES users ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS group_contains_user (
group_id bigint NOT NULL REFERENCES groups,
user_id bigint NOT NULL REFERENCES users,
PRIMARY KEY (group_id, user_id)
);
CREATE TABLE IF NOT EXISTS user_invited_to_group (
invite_id bigserial PRIMARY KEY,
user_id bigint NOT NULL references users,
group_id bigint NOT NULL references groups
);
INSERT INTO users(username, email, password, first_name, last_name) values('jeff', 'fake@email.com', 'password', 'Jeff', 'Fennell');
INSERT INTO users(username, email, password, first_name, last_name) values('SomeDude', 'fake123@email.com', 'password', 'The', 'Dude');
INSERT INTO groups(name, created_on, admin) values ('The best group', 1459821220, 1);
INSERT INTO group_contains_user (group_id, user_id) values(1,1);
INSERT INTO group_contains_user (group_id, user_id) values(1,2); |
Revert "Intentionally breaking code to check build failure on CI. <Jaideep>" - verified it :) | --liquibase formatted sql
--changeset JAIDEEP:1
CREATE TABLE PERSON (
ID SERIAL PRIMARY KEY,
FIRST_NAME VARCHAR(130) NOT NULL,
MIDDLE_NAME VARCHAR(130),
LAST_NAME VARCHAR(130),
CATEGORY varchar(200) NOT NULL remove it
);
--rollback drop table PERSON; | --liquibase formatted sql
--changeset JAIDEEP:1
CREATE TABLE PERSON (
ID SERIAL PRIMARY KEY,
FIRST_NAME VARCHAR(130) NOT NULL,
MIDDLE_NAME VARCHAR(130),
LAST_NAME VARCHAR(130),
CATEGORY varchar(200) NOT NULL
);
--rollback drop table PERSON; |
Add updated function to db alter sql | -- These are changes needed to the schema to support moving over to DBIx::Class
begin;
-- table for sessions - needed by Catalyst::Plugin::Session::Store::DBIC
CREATE TABLE sessions (
id CHAR(72) PRIMARY KEY,
session_data TEXT,
expires INTEGER
);
-- users table
create table users (
id serial not null primary key,
email text not null unique,
name text,
phone text,
password text not null default ''
);
--- add PK to contacts table
ALTER TABLE contacts
ADD COLUMN id SERIAL PRIMARY KEY;
AlTER TABLE contacts_history
ADD COLUMN contact_id integer;
update contacts_history
set contact_id = (
select id
from contacts
where contacts_history.category = contacts.category
and contacts_history.area_id = contacts.area_id
);
AlTER TABLE contacts_history
alter COLUMN contact_id SET NOT NULL;
commit;
| -- These are changes needed to the schema to support moving over to DBIx::Class
begin;
-- table for sessions - needed by Catalyst::Plugin::Session::Store::DBIC
CREATE TABLE sessions (
id CHAR(72) PRIMARY KEY,
session_data TEXT,
expires INTEGER
);
-- users table
create table users (
id serial not null primary key,
email text not null unique,
name text,
phone text,
password text not null default ''
);
--- add PK to contacts table
ALTER TABLE contacts
ADD COLUMN id SERIAL PRIMARY KEY;
AlTER TABLE contacts_history
ADD COLUMN contact_id integer;
update contacts_history
set contact_id = (
select id
from contacts
where contacts_history.category = contacts.category
and contacts_history.area_id = contacts.area_id
);
AlTER TABLE contacts_history
alter COLUMN contact_id SET NOT NULL;
create or replace function contacts_updated()
returns trigger as '
begin
insert into contacts_history (contact_id, area_id, category, email, editor, whenedited, note, confirmed, deleted) values (new.id, new.area_id, new.category, new.email, new.editor, new.whenedited, new.note, new.confirmed, new.deleted);
return new;
end;
' language 'plpgsql';
commit;
|
Correct index for identifier search | CREATE INDEX HPA_KA_SEARCH ON IIS.KNOWN_AS
(
PERSON_SURNAME,
PERSON_FORENAME_1,
PERSON_BIRTH_DATE,
FK_PERSON_IDENTIFIER
)
CREATE INDEX HPA_LOL_SEARCH ON IIS.LOSS_OF_LIBERTY
(
INMATE_SURNAME,
INMATE_FORENAME_1,
INMATE_BIRTH_DATE,
FK_PERSON_IDENTIFIER
)
CREATE INDEX HPA_IDENT_SEARCH ON IIS.IIS_IDENTIFIER
(
PERSON_IDENT_TYPE_CODE,
PERSON_IDENTIFIER_VALUE,
FK_PERSON_IDENTIFIER
)
| CREATE INDEX HPA_IDENT_SEARCH ON IIS.IIS_IDENTIFIER
(
PERSON_IDENT_TYPE_CODE,
PERSON_IDENTIFIER_VALUE,
FK_PERSON_IDENTIFIER
)
|
Add DELETE statement to fix broken data in users_history table | ALTER TABLE `users_history`
CHANGE COLUMN `user_id` `user_id` INT (11) NOT NULL,
CHANGE COLUMN `created` `created` DATETIME (6) NOT NULL,
CHANGE COLUMN `modified` `modified` DATETIME (6) NOT NULL,
CHANGE COLUMN `id` `id` INT (10) UNSIGNED NOT NULL AUTO_INCREMENT,
CHANGE COLUMN `email` `email` VARCHAR (75) NOT NULL,
ADD INDEX `users_history_user_id_358ca354_fk_users_id` (`user_id`),
ADD CONSTRAINT `users_history_user_id_358ca354_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`);
| /* There are some old entries in users_history which point to non-existent users, need to delete them to add the missing constraint */
DELETE FROM `users_history` USING `users_history`
LEFT JOIN `users` ON `users_history`.`user_id` = `users`.`id`
WHERE `users`.`id` IS NULL AND `users_history`.`user_id` IS NOT NULL;
ALTER TABLE `users_history`
CHANGE COLUMN `user_id` `user_id` INT (11) NOT NULL,
CHANGE COLUMN `created` `created` DATETIME (6) NOT NULL,
CHANGE COLUMN `modified` `modified` DATETIME (6) NOT NULL,
CHANGE COLUMN `id` `id` INT (10) UNSIGNED NOT NULL AUTO_INCREMENT,
CHANGE COLUMN `email` `email` VARCHAR (75) NOT NULL,
ADD INDEX `users_history_user_id_358ca354_fk_users_id` (`user_id`),
ADD CONSTRAINT `users_history_user_id_358ca354_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`);
|
Trim leading and trailing whitespace and replace blanks with null in ObjectImagesExport view | CREATE OR REPLACE VIEW ObjectImagesExport AS
SELECT
o.ItemType,
i.Accession_Full_ID,
o.ItemName,
ObjectsImagesID,
CONCAT_WS('/', REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(ImageFilePath, '\\', '/'), 'Y:/', ''), 'Y:', ''),
'//Foyer/c/Images Mosaic/', ''), '//Foyer/c/Images Mosaic/', ''),
'//SERVER1/Images Mosaic/', ''), ImageFileName) AS ImagePath,
ImageDescription,
DisplayImage,
DefaultImageIndicator
FROM ObjectsImages i
JOIN Objects o ON (i.Accession_Full_ID = o.Accession_Full_ID)
WHERE TRIM(ImageFileName) <> ''
ORDER BY ObjectsImagesID;
| CREATE OR REPLACE VIEW ObjectImagesExport AS
SELECT
NULLIF(TRIM(o.ItemType), '') AS ItemType,
NULLIF(TRIM(i.Accession_Full_ID), '') AS Accession_Full_ID,
NULLIF(TRIM(o.ItemName), '') AS ItemName,
NULLIF(TRIM(ObjectsImagesID), '') AS ObjectsImagesID,
NULLIF(TRIM(CONCAT_WS('/', REPLACE(
REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(ImageFilePath, '\\', '/'), 'Y:/', ''), 'Y:', ''),
'//Foyer/c/Images Mosaic/', ''), '//Foyer/c/Images Mosaic/', ''),
'//SERVER1/Images Mosaic/', ''), ImageFileName)), '') AS ImagePath,
NULLIF(TRIM(ImageDescription), '') AS ImageDescription,
NULLIF(TRIM(DisplayImage), '') AS DisplayImage,
NULLIF(TRIM(DefaultImageIndicator), '') AS DefaultImageIndicator
FROM ObjectsImages i
JOIN Objects o ON (i.Accession_Full_ID = o.Accession_Full_ID)
WHERE TRIM(ImageFileName) <> ''
ORDER BY ObjectsImagesID;
|
Fix up indices for users_who_share_rooms | /* Copyright 2017 Vector Creations Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Table keeping track of who shares a room with who. We only keep track
-- of this for local users, so `user_id` is local users only (but we do keep track
-- of which remote users share a room)
CREATE TABLE users_who_share_rooms (
user_id TEXT NOT NULL,
other_user_id TEXT NOT NULL,
room_id TEXT NOT NULL,
share_private BOOLEAN NOT NULL -- is the shared room private? i.e. they share a private room
);
CREATE UNIQUE INDEX users_who_share_rooms_u_idx ON users_who_share_rooms(user_id, other_user_id);
CREATE INDEX users_who_share_rooms_r_idx ON users_who_share_rooms(room_id, user_id);
-- Make sure that we popualte the table initially
UPDATE user_directory_stream_pos SET stream_id = NULL;
| /* Copyright 2017 Vector Creations Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Table keeping track of who shares a room with who. We only keep track
-- of this for local users, so `user_id` is local users only (but we do keep track
-- of which remote users share a room)
CREATE TABLE users_who_share_rooms (
user_id TEXT NOT NULL,
other_user_id TEXT NOT NULL,
room_id TEXT NOT NULL,
share_private BOOLEAN NOT NULL -- is the shared room private? i.e. they share a private room
);
CREATE UNIQUE INDEX users_who_share_rooms_u_idx ON users_who_share_rooms(user_id, other_user_id);
CREATE INDEX users_who_share_rooms_r_idx ON users_who_share_rooms(room_id);
CREATE INDEX users_who_share_rooms_o_idx ON users_who_share_rooms(other_user_id);
-- Make sure that we popualte the table initially
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
Add default value for user.account_type field to trackit | -- Copyright 2020 MSolution.IO
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
ALTER TABLE user ADD account_type VARCHAR(255) NOT NULL DEFAULT "";
ALTER TABLE user DROP CONSTRAINT unique_email;
ALTER TABLE user ADD CONSTRAINT type_email_unique UNIQUE (email, account_type); | -- Copyright 2020 MSolution.IO
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
ALTER TABLE user ADD account_type VARCHAR(255) NOT NULL DEFAULT "trackit";
ALTER TABLE user DROP CONSTRAINT unique_email;
ALTER TABLE user ADD CONSTRAINT type_email_unique UNIQUE (email, account_type); |
Make table changes remember status before changeing | ALTER TABLE `contest` CHANGE `status` `status` ENUM( 'Open', 'Closed' ) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'Open';
ALTER TABLE `contest` ADD `visibility` ENUM( 'Visible', 'Hidden' ) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'Hidden';
| ALTER TABLE `contest` ADD `visibility` ENUM( 'Visible', 'Hidden' ) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'Hidden';
UPDATE `contest` SET `startDate` = NOW(), `endDate` = NOW() + INTERVAL 3 YEAR WHERE `status` = 'RunningContest';
UPDATE `contest` SET `startDate` = NOW() + INTERVAL 2 YEAR, `endDate` = NOW() + INTERVAL 3 YEAR WHERE `status` = 'FutureContest';
UPDATE `contest` SET `startDate` = NOW() - INTERVAL 2 YEAR, `endDate` = NOW() WHERE `status` = 'PastContest';
UPDATE `contest` SET `visibilty` = 'Visible' WHERE `status` != 'Hidden';
ALTER TABLE `contest` CHANGE `status` `status` ENUM( 'Open', 'Closed' ) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'Open';
|
Fix query to count apprenticeships with missing email addresses | CREATE VIEW [DashboardReporting].[ApprenticeshipsWithNoEmail]
AS
SELECT
C.EmployerAndProviderApprovedOn AS ApprovedOn,
A.Id AS ApprenticeshipId
FROM Apprenticeship A
INNER JOIN Commitment C ON A.CommitmentId = C.Id
WHERE A.IsApproved = 1 AND C.EmployerAndProviderApprovedOn >= '2000-09-10' AND A.Email IS NULL
GO | CREATE VIEW [DashboardReporting].[ApprenticeshipsWithNoEmail]
AS
SELECT
C.EmployerAndProviderApprovedOn AS ApprovedOn,
A.Id AS ApprenticeshipId
FROM Apprenticeship A
INNER JOIN Commitment C ON A.CommitmentId = C.Id
WHERE A.IsApproved = 1 AND C.EmployerAndProviderApprovedOn >= '2021-09-10' AND A.Email IS NULL AND A.ContinuationOfId IS NULL
GO |
Remove kv table from postgres ddl script | -- Sample database schema for tourbillon
CREATE TABLE IF NOT EXISTS jobs (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS workflows (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS accounts (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS templates (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS kv (
key VARCHAR PRIMARY KEY,
value TEXT
);
CREATE TABLE IF NOT EXISTS events (
id VARCHAR,
job_id VARCHAR,
"start" BIGINT NOT NULL,
"interval" INT NULL,
data TEXT,
is_expired BOOLEAN NOT NULL DEFAULT false,
PRIMARY KEY (id, job_id, "start")
);
CREATE INDEX events_active_events_start
ON events ("start", is_expired);
| -- Sample database schema for tourbillon
CREATE TABLE IF NOT EXISTS jobs (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS workflows (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS accounts (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS templates (
id VARCHAR PRIMARY KEY,
data TEXT
);
CREATE TABLE IF NOT EXISTS events (
id VARCHAR,
job_id VARCHAR,
"start" BIGINT NOT NULL,
"interval" INT NULL,
data TEXT,
is_expired BOOLEAN NOT NULL DEFAULT false,
PRIMARY KEY (id, job_id, "start")
);
CREATE INDEX events_active_events_start
ON events ("start", is_expired);
|
Simplify best end date query a little | select
min(
ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
ifsapp.work_time_calendar_api.get_end_time(
wc.calendar_id,
greatest(
fc.start_work_day,
ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
trunc(sysdate),
:pre_ccr_buffer
)
),
:total_touch_time
),
:post_ccr_buffer
)
) best_end_date
from finiteload.free_capacity fc
join ifsapp.work_center wc
on fc.contract = wc.contract
and fc.work_center_no = wc.work_center_no
where fc.contract = :contract
and fc.work_center_no = :work_center_no
and fc.finish_work_day >=
ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
trunc(sysdate),
:pre_ccr_buffer
)
and fc.start_work_day <=
ifsapp.work_time_calendar_api.get_start_time(
wc.calendar_id,
fc.finish_work_day,
:total_touch_time
)
and ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
trunc(sysdate),
:pre_ccr_buffer
) <=
ifsapp.work_time_calendar_api.get_start_time(
wc.calendar_id,
fc.finish_work_day,
:total_touch_time
)
| select
min(
ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
ifsapp.work_time_calendar_api.get_end_time(
wc.calendar_id,
greatest(
fc.start_work_day,
ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
trunc(sysdate),
:pre_ccr_buffer
)
),
:total_touch_time
),
:post_ccr_buffer
)
) best_end_date
from finiteload.free_capacity fc
join ifsapp.work_center wc
on fc.contract = wc.contract
and fc.work_center_no = wc.work_center_no
where fc.contract = :contract
and fc.work_center_no = :work_center_no
and fc.capacity_available >= :total_touch_time
and fc.finish_work_day >=
ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
trunc(sysdate),
:pre_ccr_buffer
)
and ifsapp.work_time_calendar_api.get_end_date(
wc.calendar_id,
trunc(sysdate),
:pre_ccr_buffer
) <=
ifsapp.work_time_calendar_api.get_start_time(
wc.calendar_id,
fc.finish_work_day,
:total_touch_time
)
|
Add comment (just a test commit!) | OPTIONS
compile yes
merge trees.all.pak
| OPTIONS
compile yes
merge trees.all.pak
# There's relatively little sense in allowing users to manipulate single trees, usually they are only all deleted for network games.
|
Use REPLACE INTO (maybe the new rows already exist) Removed db specification. | --
-- Time Warsong Gulch
--
INSERT INTO `ascemu_world`.`worldstate_templates` (`map`, `zone`, `field`, `value`) VALUES (489, 3277, 4247, 1);
INSERT INTO `ascemu_world`.`worldstate_templates` (`map`, `zone`, `field`, `value`) VALUES (489, 3277, 4248, 25);
--
-- Update world db version
--
UPDATE `world_db_version` SET `LastUpdate` = '2016-01-30_01_time_warsong' WHERE `LastUpdate` = '2016-01-25_02_loot_creatures';
| --
-- Time Warsong Gulch
--
REPLACE INTO `worldstate_templates` (`map`, `zone`, `field`, `value`) VALUES (489, 3277, 4247, 1);
REPLACE INTO `worldstate_templates` (`map`, `zone`, `field`, `value`) VALUES (489, 3277, 4248, 25);
--
-- Update world db version
--
UPDATE `world_db_version` SET `LastUpdate` = '2016-01-30_01_time_warsong' WHERE `LastUpdate` = '2016-01-25_02_loot_creatures';
|
Add table for export profiles. | CREATE TABLE IF NOT EXISTS lddb (
id text not null unique primary key,
data jsonb not null,
collection text not null,
changedIn text not null,
changedBy text,
checksum text not null,
created timestamp with time zone not null default now(),
modified timestamp with time zone not null default now(),
deleted boolean default false,
depMinModified timestamp with time zone not null default now(),
depMaxModified timestamp with time zone not null default now()
);
CREATE TABLE IF NOT EXISTS lddb__identifiers (
pk serial,
id text not null,
iri text not null,
graphIndex integer,
mainId boolean not null
);
CREATE TABLE IF NOT EXISTS lddb__dependencies (
pk serial,
id text not null,
dependsOnId text not null
);
CREATE TABLE IF NOT EXISTS lddb__versions (
pk serial,
id text not null,
data jsonb not null,
collection text not null,
changedIn text not null,
changedBy text,
checksum text not null,
modified timestamp with time zone not null default now(),
deleted boolean default false,
unique (id, checksum, modified)
);
CREATE TABLE IF NOT EXISTS lddb__settings (
key text not null unique primary key,
settings jsonb not null
);
| CREATE TABLE IF NOT EXISTS lddb (
id text not null unique primary key,
data jsonb not null,
collection text not null,
changedIn text not null,
changedBy text,
checksum text not null,
created timestamp with time zone not null default now(),
modified timestamp with time zone not null default now(),
deleted boolean default false,
depMinModified timestamp with time zone not null default now(),
depMaxModified timestamp with time zone not null default now()
);
CREATE TABLE IF NOT EXISTS lddb__identifiers (
pk serial,
id text not null,
iri text not null,
graphIndex integer,
mainId boolean not null
);
CREATE TABLE IF NOT EXISTS lddb__dependencies (
pk serial,
id text not null,
dependsOnId text not null
);
CREATE TABLE IF NOT EXISTS lddb__versions (
pk serial,
id text not null,
data jsonb not null,
collection text not null,
changedIn text not null,
changedBy text,
checksum text not null,
modified timestamp with time zone not null default now(),
deleted boolean default false,
unique (id, checksum, modified)
);
CREATE TABLE IF NOT EXISTS lddb__settings (
key text not null unique primary key,
settings jsonb not null
);
CREATE TABLE IF NOT EXISTS lddb__profiles (
library_id text not null unique primary key,
profile text not null
);
|
Fix a SQL syntax error | -- avoid innocuous NOTICEs about automatic sequence creation
set client_min_messages='WARNING';
-- Tell psql to stop on an error. Default behavior is to proceed.
\set ON_ERROR_STOP 1
-- Tables for the KM
-- ----------------------------------------------------------------------
DROP TABLE IF EXISTS km_asserted_attribute;
CREATE TABLE km_asserted_attribute (
id SERIAL PRIMARY KEY,
eppn VARCHAR NOT NULL,
name VARCHAR NOT NULL,
value VARCHAR NOT NULL,
asserter_id UUID,
created timestamp DEFAULT NOW() AT TIME ZONE 'UTC'
);
CREATE INDEX km_asserted_attribute_eppn ON km_asserted_attribute (eppn);
| -- avoid innocuous NOTICEs about automatic sequence creation
set client_min_messages='WARNING';
-- Tell psql to stop on an error. Default behavior is to proceed.
\set ON_ERROR_STOP 1
-- Tables for the KM
-- ----------------------------------------------------------------------
DROP TABLE IF EXISTS km_asserted_attribute;
CREATE TABLE km_asserted_attribute (
id SERIAL PRIMARY KEY,
eppn VARCHAR NOT NULL,
name VARCHAR NOT NULL,
value VARCHAR NOT NULL,
asserter_id UUID,
created timestamp DEFAULT (NOW() AT TIME ZONE 'UTC')
);
CREATE INDEX km_asserted_attribute_eppn ON km_asserted_attribute (eppn);
|
Add update user photo sql function. | -- :name create-profile :<! :1
-- :doc Create a new user entry.
insert into users (id, fullname, username, email, password, metadata)
values (:id, :fullname, :username, :email, :password, :metadata)
returning *;
-- :name get-profile :? :1
-- :doc Retrieve the profile data.
select * from users
where id = :id and deleted = false;
-- :name get-profile-by-username :? :1
-- :doc Retrieve the profile data.
select * from users
where (username = :username or email = :username)
and deleted = false;
-- :name update-profile :<! :1
-- :doc Update profile.
update users set username = :username,
email = :email,
fullname = :fullname,
metadata = :metadata
where id = :id and deleted = false
returning *;
-- :name update-profile-password :! :n
-- :doc Update profile password
update users set password = :password
where id = :id and deleted = false;
| -- :name create-profile :<! :1
insert into users (id, fullname, username, email, password, metadata, photo)
values (:id, :fullname, :username, :email, :password, :metadata, '')
returning *;
-- :name get-profile :? :1
select * from users
where id = :id and deleted = false;
-- :name get-profile-by-username :? :1
select * from users
where (username = :username or email = :username)
and deleted = false;
-- :name update-profile :<! :1
update users
set username = :username,
email = :email,
fullname = :fullname,
metadata = :metadata
where id = :id and deleted = false
returning *;
-- :name update-profile-password :! :n
update users set password = :password
where id = :id and deleted = false;
-- :name update-profile-photo :! :n
update users set photo = :photo
where id = :id and deleted = false;
|
Add UUID to signages view | CREATE VIEW {# geotrek.signage #}.v_signages AS (
SELECT e.geom, e.id, t.*
FROM signage_signage AS t, signage_signagetype AS b, core_topology AS e
WHERE t.topo_object_id = e.id AND t.type_id = b.id
AND e.deleted = FALSE
);
| CREATE VIEW {# geotrek.signage #}.v_signages AS (
SELECT e.geom, e.id, e.uuid, t.*
FROM signage_signage AS t, signage_signagetype AS b, core_topology AS e
WHERE t.topo_object_id = e.id AND t.type_id = b.id
AND e.deleted = FALSE
);
|
Make MySQL user passwords match | # Create a MySQL user with limited privileges for backup purposes.
# This isn't strictly necessary on a local machine - you could just use the root
# MySQL user in the backup script.
# ==============================================================================
# Get a MySQL/MariaDB prompt:
mysql -u root -p
# In the MySQL shell enter:
CREATE USER 'backup_user'@'localhost' IDENTIFIED BY 'CiCHVQ2o8TSCq31SbWhQBkmn6';
GRANT SELECT, SHOW VIEW, RELOAD, REPLICATION CLIENT, EVENT, TRIGGER ON *.* TO 'backup_user'@'localhost';
FLUSH PRIVILEGES;
SHOW GRANTS FOR 'backup_user'@'localhost';
| # Create a MySQL user with limited privileges for backup purposes.
# This isn't strictly necessary on a local machine - you could just use the root
# MySQL user in the backup script.
# ==============================================================================
# Get a MySQL/MariaDB prompt:
mysql -u root -p
# In the MySQL shell enter:
CREATE USER 'backup_user'@'localhost' IDENTIFIED BY 'nicelongpassword';
GRANT SELECT, SHOW VIEW, RELOAD, REPLICATION CLIENT, EVENT, TRIGGER ON *.* TO 'backup_user'@'localhost';
FLUSH PRIVILEGES;
SHOW GRANTS FOR 'backup_user'@'localhost';
|
Fix the Oracle upgrade script | ALTER TABLE share ADD latency_threshold NUMBER(*,0);
UPDATE share SET latency_threshold = 20;
COMMIT;
QUIT;
| ALTER TABLE "share" ADD latency_threshold NUMBER(*,0);
UPDATE "share" SET latency_threshold = 20;
COMMIT;
QUIT;
|
Remove the duplicated entry "route.all-resources". | #This contains all the db schema changes that are going to be applied every time the server is deployed
delete from ofProperty where name='provider.lockout.className';
insert into ofProperty values ('provider.lockout.className', 'com.magnet.mmx.lockout.MmxLockoutProvider');
delete from ofProperty where name='xmpp.parser.buffer.size';
insert into ofProperty values ('xmpp.parser.buffer.size', '2097152');
delete from ofProperty where name='xmpp.routing.strict';
insert into ofProperty values('xmpp.routing.strict', 'true');
delete from ofProperty where name='xmpp.client.idle';
insert into ofProperty values('xmpp.client.idle', '-1');
delete from ofProperty where name='xmpp.client.idle.ping';
insert into ofProperty values('xmpp.client.idle.ping', 'false');
delete from ofProperty where name='route.all-resources';
insert into ofProperty values('route.all-resources', 'true');
delete from ofProperty where name='xmpp.proxy.enabled';
insert into ofProperty values('xmpp.proxy.enabled', 'false');
delete from ofProperty where name='xmpp.auth.anonymous';
insert into ofProperty values('xmpp.auth.anonymous', 'false');
delete from ofProperty where name='route.all-resources';
insert into ofProperty values('route.all-resources', 'true');
| #This contains all the db schema changes that are going to be applied every time the server is deployed
delete from ofProperty where name='provider.lockout.className';
insert into ofProperty values ('provider.lockout.className', 'com.magnet.mmx.lockout.MmxLockoutProvider');
delete from ofProperty where name='xmpp.parser.buffer.size';
insert into ofProperty values ('xmpp.parser.buffer.size', '2097152');
delete from ofProperty where name='xmpp.routing.strict';
insert into ofProperty values('xmpp.routing.strict', 'true');
delete from ofProperty where name='xmpp.client.idle';
insert into ofProperty values('xmpp.client.idle', '-1');
delete from ofProperty where name='xmpp.client.idle.ping';
insert into ofProperty values('xmpp.client.idle.ping', 'false');
delete from ofProperty where name='route.all-resources';
insert into ofProperty values('route.all-resources', 'true');
delete from ofProperty where name='xmpp.proxy.enabled';
insert into ofProperty values('xmpp.proxy.enabled', 'false');
delete from ofProperty where name='xmpp.auth.anonymous';
insert into ofProperty values('xmpp.auth.anonymous', 'false');
|
Enlarge SQL fields to more comfortable sizes |
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
CREATE TABLE `issues` (
`id` int(10) UNSIGNED NOT NULL,
`title` varchar(128) NOT NULL,
`description` text,
`scene` varchar(256) NOT NULL,
`state` int(11) NOT NULL,
`category` int(11) NOT NULL,
`position` varchar(48) NOT NULL,
`cameraPosition` varchar(48) NOT NULL,
`cameraOrientation` varchar(48) NOT NULL,
`orthographicSize` float DEFAULT NULL,
`reporter` varchar(64) NOT NULL,
`assignee` varchar(64) DEFAULT NULL,
`screenshot` varchar(64) DEFAULT NULL,
`time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`archived` boolean NOT NULL DEFAULT FALSE
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
--
ALTER TABLE `issues`
ADD PRIMARY KEY (`id`);
ALTER TABLE `issues`
ADD INDEX (`archived`);
--
ALTER TABLE `issues`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT;
|
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
CREATE TABLE `issues` (
`id` int(10) UNSIGNED NOT NULL,
`title` varchar(256) NOT NULL,
`description` text,
`scene` varchar(256) NOT NULL,
`state` int(11) NOT NULL,
`category` int(11) NOT NULL,
`position` varchar(64) NOT NULL,
`cameraPosition` varchar(64) NOT NULL,
`cameraOrientation` varchar(64) NOT NULL,
`orthographicSize` float DEFAULT NULL,
`reporter` varchar(128) NOT NULL,
`assignee` varchar(128) DEFAULT NULL,
`screenshot` varchar(64) DEFAULT NULL,
`time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`archived` boolean NOT NULL DEFAULT FALSE
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
--
ALTER TABLE `issues`
ADD PRIMARY KEY (`id`);
ALTER TABLE `issues`
ADD INDEX (`archived`);
--
ALTER TABLE `issues`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT;
|
Fix the query and test insert Argomenti. | #Create db 'des' (Database-Exercise-System)
CREATE DATABASE IF NOT EXISTS des;
#Create 'argomenti' table
CREATE TABLE IF NOT EXISTS argomenti(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
argomento varchar(255) NOT NULL
);
#Create 'soluzioni' table
CREATE TABLE IF NOT EXISTS soluzioni(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
soluzione varchar(255) NOT NULL
);
#Create 'domandeALG' table
CREATE TABLE IF NOT EXISTS domandeALG(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
testo varchar(255) NOT NULL,
soluzione INT NOT NULL,
argomento INT NOT NULL,
FOREIGN KEY (soluzione) REFERENCES soluzioni(id),
FOREIGN KEY (argomento) REFERENCES argomenti(id)
);
#Create 'domandeSQL' table
CREATE TABLE IF NOT EXISTS domandeSQL(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
testo varchar(255) NOT NULL,
soluzione INT NOT NULL,
argomento INT NOT NULL,
FOREIGN KEY (soluzione) REFERENCES soluzioni(id),
FOREIGN KEY (argomento) REFERENCES argomenti(id)
); | #Create db 'des' (Database-Exercise-System)
CREATE DATABASE IF NOT EXISTS des;
#Create 'argomenti' table
CREATE TABLE IF NOT EXISTS des.argomenti(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
argomento varchar(255) NOT NULL
);
#Create 'soluzioni' table
CREATE TABLE IF NOT EXISTS des.soluzioni(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
soluzione varchar(255) NOT NULL
);
#Create 'domandeALG' table
CREATE TABLE IF NOT EXISTS des.domandeALG(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
testo varchar(255) NOT NULL,
soluzione INT NOT NULL,
argomento INT NOT NULL,
FOREIGN KEY (soluzione) REFERENCES soluzioni(id),
FOREIGN KEY (argomento) REFERENCES argomenti(id)
);
#Create 'domandeSQL' table
CREATE TABLE IF NOT EXISTS des.domandeSQL(
id INT PRIMARY KEY AUTO_INCREMENT NOT NULL,
testo varchar(255) NOT NULL,
soluzione INT NOT NULL,
argomento INT NOT NULL,
FOREIGN KEY (soluzione) REFERENCES soluzioni(id),
FOREIGN KEY (argomento) REFERENCES argomenti(id)
);
#Test population argomenti
INSERT INTO des.argomenti(argomento) VALUES ('Argomento1'),('Argomento2'),('Argomento3'),('Argomento4'); |
Fix user ID in migration 32 | ALTER TABLE jam
ADD COLUMN jam_default_icon_url TEXT NULL AFTER jam_colors;
UPDATE jam
SET jam_default_icon_url = "logo.png";
INSERT INTO config
(config_lastedited, config_lasteditedby, config_key, config_value, config_category, config_description, config_type, config_options, config_editable, config_required, config_added_to_dictionary)
VALUES
(Now(), '1', 'DEFAULT_GAME_ICON_URL', 'logo.png', 'NEW_JAM_DEFAULTS', 'URL to the default entry icon for entries without a custom icon', 'TEXT', '[]', '1', '1', '1'); | ALTER TABLE jam
ADD COLUMN jam_default_icon_url TEXT NULL AFTER jam_colors;
UPDATE jam
SET jam_default_icon_url = "logo.png";
INSERT INTO config
(config_lastedited, config_lasteditedby, config_key, config_value, config_category, config_description, config_type, config_options, config_editable, config_required, config_added_to_dictionary)
VALUES
(Now(), '-1', 'DEFAULT_GAME_ICON_URL', 'logo.png', 'NEW_JAM_DEFAULTS', 'URL to the default entry icon for entries without a custom icon', 'TEXT', '[]', '1', '1', '1'); |
Set db default collation & charset | drop database if exists mo_development;
create database mo_development;
drop database if exists mo_test;
create database mo_test;
drop database if exists mo_tmp;
create database mo_tmp;
use mo_tmp;
drop procedure if exists createUser;
delimiter $$
create procedure createUser(username varchar(50), pw varchar(50))
begin
IF (SELECT EXISTS(SELECT 1 FROM `mysql`.`user` WHERE `user` = username)) = 0 THEN
begin
set @sql = CONCAT('CREATE USER ', username, '@\'localhost\' IDENTIFIED BY \'', pw, '\'');
prepare stmt from @sql;
execute stmt;
deallocate prepare stmt;
end;
END IF;
end $$
delimiter ;
call createUser('mo', 'mo');
use mo_test;
drop database mo_tmp;
grant all privileges on mo_development.* to 'mo'@'localhost' with grant option;
grant all privileges on mo_test.* to 'mo'@'localhost' with grant option;
| drop database if exists mo_development;
create database mo_development
DEFAULT CHARACTER SET utf8
DEFAULT COLLATE utf8_general_ci;
drop database if exists mo_test;
create database mo_test
DEFAULT CHARACTER SET utf8
DEFAULT COLLATE utf8_general_ci;
drop database if exists mo_tmp;
create database mo_tmp;
use mo_tmp;
drop procedure if exists createUser;
delimiter $$
create procedure createUser(username varchar(50), pw varchar(50))
begin
IF (SELECT EXISTS(SELECT 1 FROM `mysql`.`user` WHERE `user` = username)) = 0 THEN
begin
set @sql = CONCAT('CREATE USER ', username, '@\'localhost\' IDENTIFIED BY \'', pw, '\'');
prepare stmt from @sql;
execute stmt;
deallocate prepare stmt;
end;
END IF;
end $$
delimiter ;
call createUser('mo', 'mo');
use mo_test;
drop database mo_tmp;
grant all privileges on mo_development.* to 'mo'@'localhost' with grant option;
grant all privileges on mo_test.* to 'mo'@'localhost' with grant option;
|
Add test of name in trend record | BEGIN;
SELECT plan(2);
SELECT trend_directory.create_table_trend_store(
'test1',
'some_entity_type_name',
'900',
ARRAY[
('x', 'integer', 'some column with integer values')
]::trend_directory.trend_descr[]
);
SELECT columns_are(
'trend',
'test1_some_entity_type_name_qtr',
ARRAY[
'entity_id',
'timestamp',
'modified',
'x'
]
);
SELECT trend_directory.alter_trend_name(
table_trend_store,
'x',
'y'
)
FROM trend_directory.table_trend_store;
SELECT columns_are(
'trend',
'test1_some_entity_type_name_qtr',
ARRAY[
'entity_id',
'timestamp',
'modified',
'y'
]
);
SELECT * FROM finish();
ROLLBACK;
| BEGIN;
SELECT plan(3);
SELECT trend_directory.create_table_trend_store(
'test1',
'some_entity_type_name',
'900',
ARRAY[
('x', 'integer', 'some column with integer values')
]::trend_directory.trend_descr[]
);
SELECT columns_are(
'trend',
'test1_some_entity_type_name_qtr',
ARRAY[
'entity_id',
'timestamp',
'modified',
'x'
]
);
SELECT trend_directory.alter_trend_name(
table_trend_store,
'x',
'y'
)
FROM trend_directory.table_trend_store;
SELECT is(name, 'y'::name, 'trend should have new name')
FROM trend_directory.table_trend
JOIN trend_directory.table_trend_store ON table_trend_store.id = table_trend.trend_store_id
WHERE table_trend_store::name = 'test1_some_entity_type_name_qtr';
SELECT columns_are(
'trend',
'test1_some_entity_type_name_qtr',
ARRAY[
'entity_id',
'timestamp',
'modified',
'y'
]
);
SELECT * FROM finish();
ROLLBACK;
|
Add v17 update script to schema creation script | -- Generate core schema
\i schema/generate/01-schema.sql
\i schema/generate/02-augur_data.sql
\i schema/generate/03-augur_operations.sql
\i schema/generate/04-spdx.sql
\i schema/generate/05-seed_data.sql
-- Update scripts
\i schema/generate/06-schema_update_8.sql
\i schema/generate/07-schema_update_9.sql
\i schema/generate/08-schema_update_10.sql
\i schema/generate/09-schema_update_11.sql
\i schema/generate/10-schema_update_12.sql
\i schema/generate/10-schema_update_12.sql
\i schema/generate/11-schema_update_13.sql
\i schema/generate/12-schema_update_14.sql
\i schema/generate/13-schema_update_15.sql
\i schema/generate/14-schema_update_16.sql | -- Generate core schema
\i schema/generate/01-schema.sql
\i schema/generate/02-augur_data.sql
\i schema/generate/03-augur_operations.sql
\i schema/generate/04-spdx.sql
\i schema/generate/05-seed_data.sql
-- Update scripts
\i schema/generate/06-schema_update_8.sql
\i schema/generate/07-schema_update_9.sql
\i schema/generate/08-schema_update_10.sql
\i schema/generate/09-schema_update_11.sql
\i schema/generate/10-schema_update_12.sql
\i schema/generate/10-schema_update_12.sql
\i schema/generate/11-schema_update_13.sql
\i schema/generate/12-schema_update_14.sql
\i schema/generate/13-schema_update_15.sql
\i schema/generate/14-schema_update_16.sql
\i schema/generate/15-schema_update_17.sql |
Fix the primary key uniqueness. | create type distribution as enum('stable', 'testing', 'unstable', 'experimental');
create type component as enum('main');
create table source (
name varchar(255) not null,
distribution distribution not null,
component component not null default 'main',
version varchar(255) not null,
primary key(name, distribution)
);
create table source_folder (
path varchar(255) primary key,
created timestamp not null default current_timestamp
);
| create type distribution as enum('stable', 'testing', 'unstable', 'experimental');
create type component as enum('main');
create table source (
name varchar(255) not null,
distribution distribution not null,
component component not null default 'main',
version varchar(255) not null,
primary key(name, distribution, version)
);
create table source_folder (
path varchar(255) primary key,
created timestamp not null default current_timestamp
);
|
Remove link that does not work | -- SQL to show number of records recorded earlier than tracking_start_date_time.
-- Those records should be removed.
-- https://lifewatch-inbo.cartodb.com/viz/5d42a40a-9951-11e3-8315-0ed66c7bc7f3/table
select
count(*)
from
bird_tracking as t
left join bird_tracking_devices as d
on t.device_info_serial = d.device_info_serial
where
t.date_time < d.tracking_start_date_time
| -- SQL to show number of records recorded earlier than tracking_start_date_time.
-- Those records should be removed.
select
count(*)
from
bird_tracking as t
left join bird_tracking_devices as d
on t.device_info_serial = d.device_info_serial
where
t.date_time < d.tracking_start_date_time
|
Rearrange drop tables for uninstall | DROP TABLE IF EXISTS `civicrm_sms_conversation`;
DROP TABLE IF EXISTS `civicrm_sms_conversation_question`;
DROP TABLE IF EXISTS `civicrm_sms_conversation_contact`;
DROP TABLE IF EXISTS `civicrm_sms_conversation_action`;
| DROP TABLE IF EXISTS `civicrm_sms_conversation_action`;
DROP TABLE IF EXISTS `civicrm_sms_conversation_contact`;
DROP TABLE IF EXISTS `civicrm_sms_conversation`;
DROP TABLE IF EXISTS `civicrm_sms_conversation_question`;
|
Optimize member metrics for campaigns | INSERT INTO speakeasy_petition_metrics
(campaign_id, activity, is_opt_out, npeople)
SELECT
campaign_id AS campaign_id,
status AS activity,
is_opt_out AS is_opt_out,
COUNT(*) AS npeople
FROM
civicrm_contact c
JOIN civicrm_group_contact g ON g.contact_id = c.id AND g.group_id = 42 AND c.is_deleted = 0
JOIN (SELECT
CONCAT('speakout petition ', CAST(camp.external_identifier AS CHAR(10))) COLLATE utf8_unicode_ci AS source_string,
camp.external_identifier AS speakout_id,
camp.name AS name,
camp.title AS title,
camp.id AS campaign_id
FROM
civicrm_campaign AS camp) AS kampagne ON kampagne.source_string = c.source
GROUP BY campaign_id, status, is_opt_out;
| INSERT INTO speakeasy_petition_metrics
(campaign_id, activity, is_opt_out, npeople)
SELECT
camp.id AS campaign_id,
status AS activity,
is_opt_out AS is_opt_out,
COUNT(*) AS npeople
FROM
civicrm_contact c
JOIN civicrm_group_contact g ON g.contact_id = c.id AND g.group_id = 42 AND c.is_deleted = 0
JOIN civicrm_campaign AS camp ON camp.external_identifier = substr(c.source, 19)
WHERE c.source LIKE 'speakout petition %'
GROUP BY camp.id, status, is_opt_out;
|
Update MySQL init script with more comments. | CREATE TABLE IF NOT EXISTS `KV` (
-- MySQL InnoDB indexes are limited to 767 bytes
-- You can go up to 3072 bytes if you set innodb_large_prefix
-- See: http://dev.mysql.com/doc/refman/5.5/en/innodb-restrictions.html
-- Also recommended: innodb_file_per_table
`kv_key` VARBINARY(767) NOT NULL,
`kv_value` LONGBLOB NOT NULL,
PRIMARY KEY(`kv_key`)
) ENGINE=InnoDB default charset=utf8 collate=utf8_bin
| CREATE TABLE IF NOT EXISTS `KV` (
`kv_key` VARBINARY(767) NOT NULL,
`kv_value` LONGBLOB NOT NULL,
PRIMARY KEY(`kv_key`)
) ENGINE=InnoDB default charset=utf8 collate=utf8_bin
--
-- MySQL InnoDB indexes are normally limited to 767 bytes, but
-- you can go up to 3072 bytes if you set innodb_large_prefix=true;
-- see http://dev.mysql.com/doc/refman/5.5/en/innodb-restrictions.html
--
-- For example:
--
-- CREATE TABLE IF NOT EXISTS `KV` (
-- `kv_key` VARBINARY(3072) NOT NULL,
-- `kv_value` LONGBLOB NOT NULL,
-- PRIMARY KEY(`kv_key`)
-- ) ENGINE=InnoDB DEFAULT charset=utf8 collate=utf8_bin ROW_FORMAT=DYNAMIC;
--
-- And in /etc/my.cnf:
--
-- [mysqld]
-- innodb_large_prefix = true
-- innodb_file_per_table = true
-- innodb_file_format = barracuda
--
|
Use decimal literal to avoid precision problems in TPCH Q6 | -- database: presto; groups: tpch,quarantine; tables: lineitem
SELECT sum(l_extendedprice * l_discount) AS revenue
FROM
lineitem
WHERE
l_shipdate >= DATE '1994-01-01'
AND l_shipdate < DATE '1994-01-01' + INTERVAL '1' YEAR
AND l_discount BETWEEN 0.06 - 0.01 AND 0.06 + 0.01
AND l_quantity < 24
| -- database: presto; groups: tpch,quarantine; tables: lineitem
SELECT sum(l_extendedprice * l_discount) AS revenue
FROM
lineitem
WHERE
l_shipdate >= DATE '1994-01-01'
AND l_shipdate < DATE '1994-01-01' + INTERVAL '1' YEAR
AND l_discount BETWEEN decimal '0.06' - decimal '0.01' AND decimal '0.06' + decimal '0.01'
AND l_quantity < 24
|
Add new location and code query | -- Shows codes for a game
SELECT `code_lookup`.`code`, `code_lookup`.`type`, `code_lookup`.`location_id`, `locations`.`internal_note`, `locations`.`is_start`, `locations`.`is_end` FROM `code_lookup` LEFT OUTER JOIN `locations` ON `code_lookup`.`game_id` = `locations`.`game_id` AND `code_lookup`.`location_id` = `locations`.`location_id` WHERE `code_lookup`.`game_id` = 1;
| -- Shows codes for a game
SELECT `code_lookup`.`code`, `code_lookup`.`type`, `code_lookup`.`location_id`, `locations`.`internal_note`, `locations`.`is_start`, `locations`.`is_end` FROM `code_lookup` LEFT OUTER JOIN `locations` ON `code_lookup`.`game_id` = `locations`.`game_id` AND `code_lookup`.`location_id` = `locations`.`location_id` WHERE `code_lookup`.`game_id` = 1;
-- Shows codes and clusters for locations
SELECT `locations`.`location_id`, `locations`.`internal_note`, `code_lookup`.`code`, `game_location_clusters`.`cluster_id`, `game_location_clusters`.`description` FROM `locations` LEFT OUTER JOIN `code_lookup` ON `locations`.`game_id` = `code_lookup`.`game_id` AND `locations`.`location_id` = `code_lookup`.`location_id` LEFT OUTER JOIN `game_location_clusters` ON `game_location_clusters`.`game_id` = `locations`.`game_id` AND `game_location_clusters`.`cluster_id` = `locations`.`cluster_id` WHERE `locations`.`game_id` = 1 ORDER BY `game_location_clusters`.`cluster_id` ASC, `locations`.`location_id` ASC
|
Add IF NOT EXISTS modifiers to table defs | -- Keyspace -------------------------------------------------------------------
CREATE KEYSPACE uuuurrrrllll
WITH replication = {
-- 'class': 'NetworkTopologyStrategy', 'replication_factor': '3'
'class': 'SimpleStrategy', 'replication_factor': '1'
};
CREATE TABLE uuuurrrrllll.message (
short_url text,
channel text,
nick text,
url text,
PRIMARY KEY (url, short_url)
); | -- Keyspace -------------------------------------------------------------------
CREATE KEYSPACE IF NOT EXISTS uuuurrrrllll
WITH replication = {
-- 'class': 'NetworkTopologyStrategy', 'replication_factor': '3'
'class': 'SimpleStrategy', 'replication_factor': '1'
};
CREATE TABLE IF NOT EXISTS uuuurrrrllll.message (
short_url text,
channel text,
nick text,
url text,
PRIMARY KEY (url, short_url)
); |
Create table if not exist. | begin;
create table ncaa_sr.polls (
year integer,
school_id text,
school_name text,
week text,
rank integer,
primary key (year,school_id,week)
);
copy ncaa_sr.polls from '/tmp/polls.csv' with delimiter as ',' csv header quote as '"';
commit;
| begin;
create table if not exists ncaa_sr.polls (
year integer,
school_id text,
school_name text,
week text,
rank integer,
primary key (year,school_id,week)
);
copy ncaa_sr.polls from '/tmp/polls.csv' with delimiter as ',' csv header quote as '"';
commit;
|
Add missing SVN id keyword | -- tiki_myisam.sql is run after tiki.sql if MyISAM is being installed
CREATE FULLTEXT INDEX ft ON tiki_articles(`title`, `heading`, `body`);
CREATE FULLTEXT INDEX ft ON tiki_blog_posts(`data`, `title`);
CREATE FULLTEXT INDEX ft ON tiki_blogs(`title`, `description`);
CREATE FULLTEXT INDEX ft ON tiki_calendar_items(`name`,`description`);
CREATE FULLTEXT INDEX ft ON tiki_comments(title,data);
CREATE FULLTEXT INDEX ft ON tiki_directory_sites(name,description);
CREATE FULLTEXT INDEX ft ON tiki_faq_questions(question,answer);
CREATE FULLTEXT INDEX ft ON tiki_faqs(title,description);
CREATE FULLTEXT INDEX ft ON tiki_files(name,description,search_data,filename);
CREATE FULLTEXT INDEX ft ON tiki_galleries(name,description);
CREATE FULLTEXT INDEX ft ON tiki_images(name,description);
CREATE FULLTEXT INDEX ft ON tiki_pages(`pageName`,`description`,`data`);
CREATE FULLTEXT INDEX ft ON tiki_tracker_item_fields(value);
| -- tiki_myisam.sql is run after tiki.sql if MyISAM is being installed
-- $Id$
CREATE FULLTEXT INDEX ft ON tiki_articles(`title`, `heading`, `body`);
CREATE FULLTEXT INDEX ft ON tiki_blog_posts(`data`, `title`);
CREATE FULLTEXT INDEX ft ON tiki_blogs(`title`, `description`);
CREATE FULLTEXT INDEX ft ON tiki_calendar_items(`name`,`description`);
CREATE FULLTEXT INDEX ft ON tiki_comments(title,data);
CREATE FULLTEXT INDEX ft ON tiki_directory_sites(name,description);
CREATE FULLTEXT INDEX ft ON tiki_faq_questions(question,answer);
CREATE FULLTEXT INDEX ft ON tiki_faqs(title,description);
CREATE FULLTEXT INDEX ft ON tiki_files(name,description,search_data,filename);
CREATE FULLTEXT INDEX ft ON tiki_galleries(name,description);
CREATE FULLTEXT INDEX ft ON tiki_images(name,description);
CREATE FULLTEXT INDEX ft ON tiki_pages(`pageName`,`description`,`data`);
CREATE FULLTEXT INDEX ft ON tiki_tracker_item_fields(value);
|
Add running indicies script to upgrade | -- upgrade the previous schema to the current schema
-- 1.4.3 -> 1.5.0 in this version
--
-- Copyright 2013,2014 Google Inc. All Rights Reserved.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
BEGIN;
DROP VIEW report;
DROP VIEW oldreport;
.read views-sqlite3.sql
END;
| -- upgrade the previous schema to the current schema
-- 1.4.3 -> 1.5.0 in this version
--
-- Copyright 2013,2014 Google Inc. All Rights Reserved.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
BEGIN;
DROP VIEW report;
DROP VIEW oldreport;
.read indicies-sqlite3.sql
.read views-sqlite3.sql
END;
|
Change from mediumtext to varchar |
DROP TABLE IF EXISTS `Config`;
CREATE TABLE `Config` (
`id` INTEGER NOT NULL AUTO_INCREMENT,
`Name` VARCHAR(255) NOT NULL COMMENT 'Configuration name',
`Value` VARCHAR(255) NOT NULL COMMENT 'Configuration Value',
`Description` MEDIUMTEXT(1024) NULL DEFAULT NULL COMMENT 'Configuration description',
`Config_group_id` INTEGER NOT NULL COMMENT 'Configuration group foreign key',
PRIMARY KEY (`id`)
) COMMENT 'Configuration table';
DROP TABLE IF EXISTS `Config_group`;
CREATE TABLE `Config_group` (
`id` INTEGER NOT NULL AUTO_INCREMENT,
`Name` VARCHAR(255) NOT NULL COMMENT 'Configuration group name',
`Description` MEDIUMTEXT(1024) NULL DEFAULT NULL COMMENT 'Configuration group description',
PRIMARY KEY (`id`)
); COMMENT 'Configuration Group table';
# Foreign Keys
ALTER TABLE `Config` ADD FOREIGN KEY (Config_group_id) REFERENCES `Config_group` (`id`);
# Table Properties
-- ALTER TABLE `Config` ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-- ALTER TABLE `Config_group` ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
# Data
-- INSERT INTO `Config` (`id`,`Name`,`Value`,`Description`,`Config_group_id`) VALUES
-- ('','','','','');
-- INSERT INTO `Config_group` (`id`,`Name`,`Description`) VALUES
-- ('','',''); |
DROP TABLE IF EXISTS `Config`;
CREATE TABLE `Config` (
`id` INTEGER NOT NULL AUTO_INCREMENT,
`Name` VARCHAR (64) NOT NULL COMMENT 'Configuration name',
`Value` VARCHAR(255) NOT NULL COMMENT 'Configuration Value',
`Description` VARCHAR(255) NULL DEFAULT NULL COMMENT 'Configuration description',
`Config_group_id` INTEGER NOT NULL COMMENT 'Configuration group foreign key',
PRIMARY KEY (`id`)
) COMMENT 'Configuration table';
DROP TABLE IF EXISTS `Config_group`;
CREATE TABLE `Config_group` (
`id` INTEGER NOT NULL AUTO_INCREMENT,
`Name` VARCHAR(64) NOT NULL COMMENT 'Configuration group name',
`Description` VARCHAR(255) NULL DEFAULT NULL COMMENT 'Configuration group description',
PRIMARY KEY (`id`)
); COMMENT 'Configuration Group table';
# Foreign Keys
ALTER TABLE `Config` ADD FOREIGN KEY (Config_group_id) REFERENCES `Config_group` (`id`);
# Table Properties
-- ALTER TABLE `Config` ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-- ALTER TABLE `Config_group` ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
# Data
-- INSERT INTO `Config` (`id`,`Name`,`Value`,`Description`,`Config_group_id`) VALUES
-- ('','','','','');
-- INSERT INTO `Config_group` (`id`,`Name`,`Description`) VALUES
-- ('','',''); |
Fix SQL to create the entity_type and sequence tables | /* File generated automatically by dynamo */
/* Entity types */
CREATE TABLE entity_type (
/* the entity type identifier */
"id" INTEGER AUTO_INCREMENT,
/* the entity type name (table name) */
"name" VARCHAR(127) UNIQUE NOT NULL,
PRIMARY KEY ("id")
);
/* Sequence generator */
CREATE TABLE sequence (
/* the sequence name */
"name" VARCHAR(127) NOT NULL,
/* the sequence record version */
"version" int ,
/* the sequence value */
"value" BIGINT ,
/* the sequence block size */
"block_size BIGINT ,
PRIMARY KEY ("name")
);
INSERT INTO entity_type (name) VALUES
("entity_type")
,("sequence")
;
| /* File generated automatically by dynamo */
/* Entity types */
CREATE TABLE entity_type (
/* the entity type identifier */
"id" SERIAL,
/* the entity type name (table name) */
"name" VARCHAR(127) UNIQUE NOT NULL,
PRIMARY KEY ("id")
);
/* Sequence generator */
CREATE TABLE sequence (
/* the sequence name */
"name" VARCHAR(127) NOT NULL,
/* the sequence record version */
"version" int ,
/* the sequence value */
"value" BIGINT ,
/* the sequence block size */
"block_size" BIGINT ,
PRIMARY KEY ("name")
);
INSERT INTO entity_type (name) VALUES
('entity_type')
,('sequence')
;
|
Fix the script for inserting divisions | insert into divisions values 'PACIFIC', 'CENTRAL', 'METROPOLITAN', 'ATLANTIC'; | insert into divisions(name) values ('PACIFIC'), ('CENTRAL'), ('METROPOLITAN'), ('ATLANTIC'); |
Add author timestamp field to hash table | -- git_hash table
--
-- Stores ONLY metadata about commits that MediaWiki CANNOT store.
--
-- The other tables git_status_modify_hash and git_edit_hash store
-- actual CHANGES of a commit that MediaWiki cannot store or cannot
-- store in a suitable format.
CREATE TABLE IF NOT EXISTS /*_*/git_hash(
-- The primary key, contains the Git commit hash in a 40-character
-- hex representation of SHA-1
commit_hash VARBINARY(40) NOT NULL PRIMARY KEY,
-- Parent commit hashes (up to 15, separated by commas) in 40-char
-- hex of SHA-1
commit_hash_parents VARBINARY(615),
-- Email addresses and usernames can be changed in MediaWiki,
-- however this shouldn't change every previous commit (since that would
-- change the hashes). Also necessary for edits made via pull requests.
author_name VARBINARY(255),
author_email VARBINARY(255),
author_tzOffset VARBINARY(5),
-- With rebases sometimes you have different authors and committers. This
-- has to be stored somehow to keep the "real" Git repository in sync.
committer_name VARBINARY(255),
committer_email VARBINARY(255),
committer_timestamp VARBINARY(14),
committer_tzOffset VARBINARY(5)
) /*$wgDBTableOptions*/;
| -- git_hash table
--
-- Stores ONLY metadata about commits that MediaWiki CANNOT store.
--
-- The other tables git_status_modify_hash and git_edit_hash store
-- actual CHANGES of a commit that MediaWiki cannot store or cannot
-- store in a suitable format.
CREATE TABLE IF NOT EXISTS /*_*/git_hash(
-- The primary key, contains the Git commit hash in a 40-character
-- hex representation of SHA-1
commit_hash VARBINARY(40) NOT NULL PRIMARY KEY,
-- Parent commit hashes (up to 15, separated by commas) in 40-char
-- hex of SHA-1
commit_hash_parents VARBINARY(615),
-- Email addresses and usernames can be changed in MediaWiki,
-- however this shouldn't change every previous commit (since that would
-- change the hashes). Also necessary for edits made via pull requests.
author_name VARBINARY(255),
author_email VARBINARY(255),
author_timestamp VARBINARY(14),
author_tzOffset VARBINARY(5),
-- With rebases sometimes you have different authors and committers. This
-- has to be stored somehow to keep the "real" Git repository in sync.
committer_name VARBINARY(255),
committer_email VARBINARY(255),
committer_timestamp VARBINARY(14),
committer_tzOffset VARBINARY(5)
) /*$wgDBTableOptions*/;
|
Fix users_merits table to include timestamps. | CREATE TABLE users_merits (user_id INT, merit_id INT, created_at DATE);
| CREATE TABLE users_merits (
user_id INT,
merit_id INT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
|
Add project avail code n to animal billable days report | PARAMETERS
(START_DATE TIMESTAMP, END_DATE TIMESTAMP)
SELECT id
, GROUP_CONCAT(project, ', ') as Projects
, GROUP_CONCAT(date, ', ') as AssignmentDates
, GROUP_CONCAT(enddate, ', ') as ReleaseDates
, GROUP_CONCAT(project.avail, ', ') as ProjectAvails
FROM study.assignment
WHERE date <= END_DATE
AND (enddate >= START_DATE OR enddate IS NULL)
AND (project.avail = 'r' OR project.avail = 'rr')
GROUP BY id; | PARAMETERS
(START_DATE TIMESTAMP, END_DATE TIMESTAMP)
SELECT id
, GROUP_CONCAT(project, ', ') as Projects
, GROUP_CONCAT(date, ', ') as AssignmentDates
, GROUP_CONCAT(enddate, ', ') as ReleaseDates
, GROUP_CONCAT(project.avail, ', ') as ProjectAvails
FROM study.assignment
WHERE date <= END_DATE
AND (enddate >= START_DATE OR enddate IS NULL)
AND (project.avail = 'r' OR project.avail = 'rr' OR project.avail = 'n')
GROUP BY id;
|
Remove unnecessary foreign key constraint. | ALTER TABLE REPOSITORY ADD COLUMN ORIGIN_USER_NAME VARCHAR(100);
ALTER TABLE REPOSITORY ADD COLUMN ORIGIN_REPOSITORY_NAME VARCHAR(100);
ALTER TABLE REPOSITORY ADD COLUMN PARENT_USER_NAME VARCHAR(100);
ALTER TABLE REPOSITORY ADD COLUMN PARENT_REPOSITORY_NAME VARCHAR(100);
CREATE TABLE PULL_REQUEST(
USER_NAME VARCHAR(100) NOT NULL,
REPOSITORY_NAME VARCHAR(100) NOT NULL,
ISSUE_ID INT NOT NULL,
BRANCH VARCHAR(100) NOT NULL,
REQUEST_USER_NAME VARCHAR(100) NOT NULL,
REQUEST_REPOSITORY_NAME VARCHAR(100) NOT NULL,
REQUEST_BRANCH VARCHAR(100) NOT NULL,
COMMIT_ID_FROM VARCHAR(40) NOT NULL,
COMMIT_ID_TO VARCHAR(40) NOT NULL
);
ALTER TABLE PULL_REQUEST ADD CONSTRAINT IDX_PULL_REQUEST_PK PRIMARY KEY (USER_NAME, REPOSITORY_NAME, ISSUE_ID);
ALTER TABLE PULL_REQUEST ADD CONSTRAINT IDX_PULL_REQUEST_FK0 FOREIGN KEY (USER_NAME, REPOSITORY_NAME, ISSUE_ID) REFERENCES ISSUE (USER_NAME, REPOSITORY_NAME, ISSUE_ID);
--ALTER TABLE PULL_REQUEST ADD CONSTRAINT IDX_PULL_REQUEST_FK1 FOREIGN KEY (REQUEST_USER_NAME, REQUEST_REPOSITORY_NAME) REFERENCES REPOSITORY (USER_NAME, REPOSITORY_NAME);
ALTER TABLE ISSUE ADD COLUMN PULL_REQUEST BOOLEAN NOT NULL DEFAULT FALSE;
| ALTER TABLE REPOSITORY ADD COLUMN ORIGIN_USER_NAME VARCHAR(100);
ALTER TABLE REPOSITORY ADD COLUMN ORIGIN_REPOSITORY_NAME VARCHAR(100);
ALTER TABLE REPOSITORY ADD COLUMN PARENT_USER_NAME VARCHAR(100);
ALTER TABLE REPOSITORY ADD COLUMN PARENT_REPOSITORY_NAME VARCHAR(100);
CREATE TABLE PULL_REQUEST(
USER_NAME VARCHAR(100) NOT NULL,
REPOSITORY_NAME VARCHAR(100) NOT NULL,
ISSUE_ID INT NOT NULL,
BRANCH VARCHAR(100) NOT NULL,
REQUEST_USER_NAME VARCHAR(100) NOT NULL,
REQUEST_REPOSITORY_NAME VARCHAR(100) NOT NULL,
REQUEST_BRANCH VARCHAR(100) NOT NULL,
COMMIT_ID_FROM VARCHAR(40) NOT NULL,
COMMIT_ID_TO VARCHAR(40) NOT NULL
);
ALTER TABLE PULL_REQUEST ADD CONSTRAINT IDX_PULL_REQUEST_PK PRIMARY KEY (USER_NAME, REPOSITORY_NAME, ISSUE_ID);
ALTER TABLE PULL_REQUEST ADD CONSTRAINT IDX_PULL_REQUEST_FK0 FOREIGN KEY (USER_NAME, REPOSITORY_NAME, ISSUE_ID) REFERENCES ISSUE (USER_NAME, REPOSITORY_NAME, ISSUE_ID);
ALTER TABLE ISSUE ADD COLUMN PULL_REQUEST BOOLEAN NOT NULL DEFAULT FALSE;
|
Update sql query for removing recipe follow | DELETE FROM
followers_users_recipes
WHERE
user_id = ${user_id}
AND recipe_id = ${recipe_id}
RETURNING *;
| DELETE FROM
followers_users_recipes
WHERE
user_id = ${userID}
AND recipe_id = ${targetID}
RETURNING *;
|
Create indexes for tables with a FK to job.id | CREATE INDEX log_job_id on log (job_id);
CREATE INDEX performance_job_id on performance (job_id);
CREATE INDEX output_job_id on output (oq_job_id);
| |
Add script to update terms and conditions table with reference to new template | -- IFS-3980
-- New version of site-wide terms and conditions to include GDPR notice
SET @system_maintenance_user_id =
(SELECT id FROM user WHERE email = 'ifs_system_maintenance_user@innovateuk.org');
INSERT INTO terms_and_conditions (name, template, version, type, created_by, created_on, modified_on, modified_by)
VALUES ("Site Terms and Conditions", "terms-and-conditions-v2", 2, "SITE", @system_maintenance_user_id, NOW(), NOW(), @system_maintenance_user_id); | |
Add a function for creating a report of traffic from the database nicely consolated for us already | CREATE OR REPLACE FUNCTION snort.report_traffic_for_site_within_timeperiod(_site_id bigint, _seconds bigint)
RETURNS json
LANGUAGE plpgsql SECURITY DEFINER
AS $$
DECLARE
traffic_report_cursor cursor (query_site_id bigint, interval_seconds bigint) FOR
SELECT nsip_src.ip_address AS src_ip, nsip_dst.ip_address AS dst_ip, sum(txpackets) AS txpackets, sum(rxpackets) AS rxpackets FROM snort.traffic_reports AS sntr
LEFT JOIN recorder_messages AS rm ON (rm.id=msg_id)
LEFT JOIN recorders AS r ON (r.id=rm.recorder_id)
LEFT JOIN sites AS s ON (s.id=r.site_id)
LEFT JOIN network_scan.ip_addresses AS nsip_src ON (nsip_src.id=src)
LEFT JOIN network_scan.ip_addresses AS nsip_dst ON (nsip_dst.id=dst)
WHERE s.id=query_site_id
AND rm.generated_at >= current_timestamp - (interval_seconds || ' seconds')::interval
GROUP BY nsip_src.ip_address, nsip_dst.ip_address;
traffic_record record;
consolated_traffic json[];
traffic_json json;
BEGIN
-- open the cursor and start doing work on it
OPEN traffic_report_cursor(query_site_id := _site_id, interval_seconds := _seconds);
LOOP
-- Build the JSON object for each record
FETCH traffic_report_cursor INTO traffic_record;
EXIT WHEN NOT FOUND;
traffic_json := json_build_object(
'src', traffic_record.src_ip,
'dst', traffic_record.dst_ip,
'rxpackets', traffic_record.rxpackets,
'txpackets', traffic_record.txpackets
);
consolated_traffic := array_append(
consolated_traffic, traffic_json
);
END LOOP;
-- Now return the traffic information
traffic_json := json_build_object(
'consolated_traffic', consolated_traffic
);
RETURN traffic_json;
END
$$;
| |
Fix Portal/Teleport to Tol Barad | -- Fix Mage's Tol barad Portals and Teleports
UPDATE gameobject_template SET data0=88341 WHERE entry=206615; -- ally portal
UPDATE gameobject_template SET data0=88339 WHERE entry=206616; -- horde portal
DELETE FROM spell_target_position WHERE id IN (88342, 88344);
INSERT INTO spell_target_position VALUES
(88342, 732, -334.60, 1043.80, 21.900, 1.5), -- ally teleport
(88344, 732, -601.40, 1382.03, 21.900, 1.5); -- horde teleport | |
Update lukukausimaksukokeilu to lukukausimaksu (and translations) | USE [VipunenTK_lisatiedot]
GO
IF EXISTS (SELECT * FROM sys.objects WHERE object_id = OBJECT_ID(N'[dbo].[rahoituslahde]') AND type in (N'U'))
update [dbo].[rahoituslahde]
set [rahoituslahde] = 'Lukukausimaksu' --old Lukukausimaksukokeilu
,[rahoituslahde_SV] = 'Terminsavgift' --old Experiment med terminsavgift
,[rahoituslahde_EN] = 'Tuition fee' --old Tuition fee trial period
,kommentti = 'CSC Teemu' --old CSC Jarmo
where rahoituslahde_koodi = '5'
GO
USE ANTERO
GO | |
Fix errors caused by c81c0b3. | # --- !Ups
create table issue_event (
id bigint not null,
created timestamp,
sender_login_id varchar(255),
issue_id bigint,
event_type varchar(26),
old_value varchar(255),
new_value varchar(255),
constraint ck_issue_event_event_type check (event_type in ('NEW_ISSUE','NEW_POSTING','ISSUE_ASSIGNEE_CHANGED','ISSUE_STATE_CHANGED','NEW_COMMENT','NEW_PULL_REQUEST','NEW_SIMPLE_COMMENT','PULL_REQUEST_STATE_CHANGED')),
constraint pk_issue_event primary key (id))
;
create sequence issue_event_seq;
alter table issue_event add constraint fk_issue_event_issue_8 foreign key (issue_id) references issue (id) on delete restrict on update restrict;
create index ix_issue_event_issue_8 on issue_event (issue_id);
# --- !Downs
drop table if exists issue_event;
drop sequence if exists issue_event_seq;
| |
Include seed data for testing MySQL | USE uncovery;
INSERT INTO users (token) VALUES ('Archon');
INSERT INTO users (token) VALUES ('Carrier');
INSERT INTO users (token) VALUES ('Colossus');
INSERT INTO users (token) VALUES ('Dark Templar');
INSERT INTO users (token) VALUES ('Zealot');
INSERT INTO users (token) VALUES ('SCV');
INSERT INTO users (token) VALUES ('Raven');
INSERT INTO users (token) VALUES ('Marine');
INSERT INTO users (token) VALUES ('Ghost');
INSERT INTO users (token) VALUES ('Drone');
INSERT INTO users (token) VALUES ('Zergling');
INSERT INTO users (token) VALUES ('Broodling');
INSERT INTO messages (userToken, messageString) VALUES ('Zealot', 'State your will.');
INSERT INTO messages (userToken, messageString) VALUES ('Zealot', 'For Aiur!');
INSERT INTO messages (userToken, messageString) VALUES ('Dark Templar', 'Nach nagalas.');
INSERT INTO messages (userToken, messageString) VALUES ('SCV', 'SCV good to go, sir.');
INSERT INTO messages (userToken, messageString) VALUES ('Marine', 'You wanna piece of me, boy?');
INSERT INTO messages (userToken, messageString) VALUES ('Marine', 'Gimme something to shoot!');
INSERT INTO votes (userToken, messageId) VALUES ('Zealot', 1);
INSERT INTO votes (userToken, messageId) VALUES ('Zealot', 2);
INSERT INTO votes (userToken, messageId) VALUES ('Zealot', 3);
INSERT INTO votes (userToken, messageId) VALUES ('Marine', 1);
INSERT INTO votes (userToken, messageId) VALUES ('Ghost', 1);
| |
Add SQL file to create required tables | CREATE TABLE IF NOT EXISTS `Lectura` (
`ID` int(11) NOT NULL AUTO_INCREMENT,
`data` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`trasto_id` char(64) CHARACTER SET latin1 COLLATE latin1_spanish_ci NOT NULL,
`intensitat` float DEFAULT NULL,
`tensio` float DEFAULT NULL,
`temperatura1` float DEFAULT NULL,
`temperatura2` float DEFAULT NULL,
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=0 ;
-- --------------------------------------------------------
CREATE TABLE IF NOT EXISTS `Recolector` (
`ID` char(32) NOT NULL,
`data_vist` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-- --------------------------------------------------------
CREATE TABLE IF NOT EXISTS `Trasto` (
`ID` char(64) CHARACTER SET latin1 COLLATE latin1_spanish_ci NOT NULL,
`nom` text CHARACTER SET latin1 COLLATE latin1_spanish_ci,
`recolector_id` char(32) NOT NULL,
`tipus` text CHARACTER SET latin1 COLLATE latin1_spanish_ci NOT NULL,
`data_creacio` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
| |
Add sql solution for 626. Exchange Seats | SELECT IF(`id` = (SELECT MAX(`id`) FROM `seat`) AND (`id` & 1), `id`, IF(`id` & 1, `id` + 1, `id` - 1)) AS `id`, `student` FROM `seat` ORDER BY `id`
| |
Add missing comma in SQL migration. | CREATE TABLE `wiki_documentlink` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`linked_from_id` integer NOT NULL,
`linked_to_id` integer NOT NULL,
`kind` varchar(16) NOT NULL
UNIQUE (`linked_from_id`, `linked_to_id`)
) ENGINE=InnoDB CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE `wiki_documentlink`
ADD CONSTRAINT `linked_from_id_refs_id_8da923a9`
FOREIGN KEY (`linked_from_id`)
REFERENCES `wiki_document` (`id`);
ALTER TABLE `wiki_documentlink`
ADD CONSTRAINT `linked_to_id_refs_id_8da923a9`
FOREIGN KEY (`linked_to_id`)
REFERENCES `wiki_document` (`id`);
| CREATE TABLE `wiki_documentlink` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`linked_to_id` integer NOT NULL,
`linked_from_id` integer NOT NULL,
`kind` varchar(16) NOT NULL,
UNIQUE (`linked_from_id`, `linked_to_id`)
) ENGINE=InnoDB CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE `wiki_documentlink`
ADD CONSTRAINT `linked_from_id_refs_id_8da923a9`
FOREIGN KEY (`linked_from_id`)
REFERENCES `wiki_document` (`id`);
ALTER TABLE `wiki_documentlink`
ADD CONSTRAINT `linked_to_id_refs_id_8da923a9`
FOREIGN KEY (`linked_to_id`)
REFERENCES `wiki_document` (`id`);
|
Add SQL for selecting all blog posts. | SELECT
`blog_post`.`post_id`,
`blog_post`.`title`,
`blog_post`.`body`,
`blog_post`.`category`,
`blog_post`.`created`,
`blog_post`.`created_by`
FROM
`blog_post`;
| |
Make users have secret keys, not the user/app relationship. | # ---!Ups
alter table application_users drop column secret_key;
alter table users add column secret_key varchar(36) not null;
drop index application_users_idx;
create index users_secret_key_idx on users (username, secret_key);
# ---!Downs
alter table application_users add column secret_key varchar(36) not null;
alter table users drop column secret_key;
create index application_users_idx on application_users (user_id, secret_key);
| |
Fix bad upgrade014_015 ALTER TABLE command | -- Upgrade: schema_version 14 to 15
--
ALTER TABLE patch_comments ADD parent_uuid VARCHAR(40);
CREATE TABLE account_patch_reviews
(account_id INTEGER NOT NULL DEFAULT(0),
change_id INTEGER NOT NULL DEFAULT(0),
patch_set_id INTEGER NOT NULL DEFAULT(0),
file_name VARCHAR(255) NOT NULL DEFAULT(''),
PRIMARY KEY (account_id, change_id, patch_set_id, file_name)
);
ALTER_TABLE account_patch_reviews OWNER TO gerrit2;
UPDATE schema_version SET version_nbr = 15;
| -- Upgrade: schema_version 14 to 15
--
ALTER TABLE patch_comments ADD parent_uuid VARCHAR(40);
CREATE TABLE account_patch_reviews
(account_id INTEGER NOT NULL DEFAULT(0),
change_id INTEGER NOT NULL DEFAULT(0),
patch_set_id INTEGER NOT NULL DEFAULT(0),
file_name VARCHAR(255) NOT NULL DEFAULT(''),
PRIMARY KEY (account_id, change_id, patch_set_id, file_name)
);
ALTER TABLE account_patch_reviews OWNER TO gerrit2;
UPDATE schema_version SET version_nbr = 15;
|
Migrate db partition incomes by user_id |
-- +goose Up
-- SQL in section 'Up' is executed when this migration is applied
ALTER TABLE `incomes` DROP PRIMARY KEY, ADD PRIMARY KEY (`id`, `user_id`) PARTITION BY HASH(`user_id`) PARTITIONS 4096;
-- +goose Down
-- SQL section 'Down' is executed when this migration is rolled back
ALTER TABLE `incomes` REMOVE PARTITIONING;
| |
Add active field to manuscript table | -- Author: dan.leehr@nescent.org
--
-- Organizations
CREATE SEQUENCE organization_seq;
CREATE TABLE organization
(
organization_id INTEGER PRIMARY KEY not null default nextval('organization_seq'),
code VARCHAR(32) not null,
name VARCHAR(255) not null
);
CREATE UNIQUE INDEX org_code_idx on organization(code);
-- Manuscripts
CREATE SEQUENCE manuscript_seq;
CREATE TABLE manuscript
(
manuscript_id INTEGER PRIMARY KEY not null default nextval('manuscript_seq'),
organization_id INTEGER not null REFERENCES organization(organization_id),
msid VARCHAR(255) not null,
version INTEGER not null default 1,
json_data text
);
CREATE UNIQUE INDEX manuscript_msid_ver_idx on manuscript(msid, version);
CREATE INDEX manuscript_msid_idx ON manuscript(msid);
-- OAuth2 Tokens
CREATE SEQUENCE oauth_token_seq;
CREATE TABLE oauth_token
(
oauth_token_id INTEGER PRIMARY KEY not null default nextval('oauth_token_seq'),
eperson_id INTEGER not null REFERENCES eperson(eperson_id),
token VARCHAR(32) not null,
expires DATE
);
-- API users and permissions
-- should these just be tied to dspace resource policy? | -- Author: dan.leehr@nescent.org
--
-- Organizations
CREATE SEQUENCE organization_seq;
CREATE TABLE organization
(
organization_id INTEGER PRIMARY KEY not null default nextval('organization_seq'),
code VARCHAR(32) not null,
name VARCHAR(255) not null
);
CREATE UNIQUE INDEX org_code_idx on organization(code);
-- Manuscripts
CREATE SEQUENCE manuscript_seq;
CREATE TABLE manuscript
(
manuscript_id INTEGER PRIMARY KEY not null default nextval('manuscript_seq'),
organization_id INTEGER not null REFERENCES organization(organization_id),
msid VARCHAR(255) not null,
version INTEGER not null default 1,
active BOOLEAN not null default TRUE,
json_data text
);
CREATE UNIQUE INDEX manuscript_msid_ver_idx on manuscript(msid, version);
CREATE INDEX manuscript_msid_idx ON manuscript(msid);
-- OAuth2 Tokens
CREATE SEQUENCE oauth_token_seq;
CREATE TABLE oauth_token
(
oauth_token_id INTEGER PRIMARY KEY not null default nextval('oauth_token_seq'),
eperson_id INTEGER not null REFERENCES eperson(eperson_id),
token VARCHAR(32) not null,
expires DATE
);
-- API users and permissions
-- should these just be tied to dspace resource policy? |
Add script showing cyclic parents in Nominatim | select c.place_id,c.name,c.parent_place_id,c.linked_place_id,c.osm_id,c.osm_type,c.indexed_status
from placex c
join placex r on r.place_id=c.parent_place_id
where r.parent_place_id=c.place_id;
| |
Add myplaces3 to ol4 appsetup |
UPDATE portti_view_bundle_seq SET startup = '{
"metadata": {
"Import-Bundle": {
"maparcgis": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"mapmodule": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"oskariui": {
"bundlePath": "/Oskari/packages/framework/bundle/"
},
"mapwfs2": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"mapstats": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"mapuserlayers": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"ui-components": {
"bundlePath": "/Oskari/packages/framework/bundle/"
},
"mapanalysis": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"mapfull": {
"bundlePath": "/Oskari/packages/framework/bundle/"
},
"mapwmts": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
},
"mapmyplaces": {
"bundlePath": "/Oskari/packages/mapping/ol3/"
}
}
},
"bundlename": "mapfull"
}'
where view_id = (select id from portti_view where name = 'Geoportal OL3')
AND bundle_id = (select id from portti_bundle where name = 'mapfull');
INSERT INTO portti_bundle(name,startup) VALUES (
'myplaces3',
'{
"bundlename" : "myplaces3",
"metadata" : {
"Import-Bundle" : {
"myplaces3" : {
"bundlePath" : "/Oskari/packages/framework/bundle/"
}
}
}
}'
);
INSERT INTO portti_view_bundle_seq (view_id, bundle_id, seqno, startup) VALUES (
(select id from portti_view where name = 'Geoportal OL3'),
(select id from portti_bundle where name = 'myplaces3'),
190,
'{
"bundlename" : "myplaces3",
"metadata" : {
"Import-Bundle" : {
"myplaces3" : {
"bundlePath" : "/Oskari/packages/framework/bundle/"
}
}
}
}'
); | |
Add description to adminisrator role in bootstrap data | UPDATE referencedata.roles
SET description = 'System administrator.'
WHERE id = 'a439c5de-b8aa-11e6-80f5-76304dec7eb7'; | |
Add extra attributes to the attribute table. | -- Copyright [1999-2016] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
# patch_84_85_b.sql
#
# Title: Add extra attributes to the attribute table.
#
# Description:
# New attributes to be included in this release.
-- the genome_db table
ALTER TABLE gene_tree_root_attr
ADD COLUMN lca int(10) unsigned NOT NULL,
ADD COLUMN taxonomic_coverage FLOAT(5) NOT NULL,
ADD COLUMN ratio_species_genes FLOAT(5) NOT NULL;
# Patch identifier
INSERT INTO meta (species_id, meta_key, meta_value)
VALUES (NULL, 'patch', 'patch_84_85_b.sql|add_attributes');
| |
Add file of data base,Table User will be created and some users will be implemented | CREATE TABLE Utilisateur {
IdUser int NOT NULL AUTO_INCREMENT,
Username VARCHAR 100,
Password VARCHAR 100,
Email VARCHAR 100
};
INSERT INTO Utilisateur VALUES ('anthony','anthony','anthony@barei.fr');
INSERT INTO Utilisateur VALUES ('elliezar','elliezar','elliezar@rayray.fr');
INSERT INTO Utilisateur VALUES ('qixin','qixin','qixin@ying.fr'); | |
Add trigger to create borrower debarment | DROP TRIGGER IF EXISTS autoKemnerSperring;
delimiter //
CREATE TRIGGER autoKemnerSperring BEFORE UPDATE ON items
FOR EACH ROW
BEGIN
IF NEW.itemlost = 12 AND (OLD.itemlost != 12 OR OLD.itemlost IS NULL) THEN
INSERT INTO borrower_debarments (borrowernumber, type, comment, manager_id)
(SELECT borrowernumber, 'MANUAL', 'Sendt til kemner',49393 FROM issues
WHERE issues.itemnumber=NEW.itemnumber AND NOT EXISTS (
SELECT * FROM borrower_debarments
WHERE borrowernumber=borrowernumber AND comment='Sendt til kemner'));
END IF;
END;//
delimiter ;
| |
Add script to update database schema | -- create procedure
DELIMITER $$
DROP PROCEDURE IF EXISTS updateMSE$$
CREATE PROCEDURE updateMSE()
BEGIN
DECLARE done INT DEFAULT FALSE;
DECLARE p_testcase_id VARCHAR(125);
DECLARE p_auc_result VARCHAR(125);
DECLARE p_auc_time DATETIME;
DECLARE cur1 CURSOR FOR
SELECT test_case_id,date,result
FROM h2o.TestNG
WHERE metric_type = 'AUC';
DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE;
OPEN cur1;
SET SQL_SAFE_UPDATES=0;
read_loop: LOOP
FETCH cur1 INTO p_testcase_id, p_auc_time, p_auc_result;
IF done THEN
LEAVE read_loop;
END IF;
UPDATE h2o.TestNG
SET auc_result = p_auc_result
WHERE metric_type = 'MSE'
AND test_case_id = p_testcase_id
-- AND TIMESTAMP(date) BETWEEN TIMESTAMP(p_auc_time) AND DATE_ADD(TIMESTAMP(p_auc_time),INTERVAL -1 MINUTE)
AND TIMESTAMP(date) <= TIMESTAMP(p_auc_time)
AND TIMESTAMP(date) >= DATE_ADD(TIMESTAMP(p_auc_time),INTERVAL -1 MINUTE)
-- AND date = p_auc_time
;
END LOOP;
SET SQL_SAFE_UPDATES=1;
CLOSE cur1;
END $$
DELIMITER ;
-- create back up table
CREATE TABLE IF NOT EXISTS TestNG_backup
(
test_case_id VARCHAR(125),
training_frame_id VARCHAR(125),
validation_frame_id VARCHAR(125),
metric_type VARCHAR(125),
result VARCHAR(125),
date datetime,
interpreter_version VARCHAR(125),
machine_name VARCHAR(125),
total_hosts INT,
cpus_per_hosts INT,
total_nodes INT,
source VARCHAR(125),
parameter_list VARCHAR(1024),
git_hash_number VARCHAR(125),
tuned_or_defaults VARCHAR(125)
);
-- back up data
INSERT INTO TestNG_backup
SELECT * FROM TestNG;
-- add auc_result column in TestNG table
ALTER TABLE TestNG ADD COLUMN auc_result DOUBLE AFTER validation_frame_id;
-- update auc result
CALL updateMSE();
-- add mse_column in TestNG table
ALTER TABLE TestNG ADD COLUMN mse_result DOUBLE AFTER validation_frame_id;
SET SQL_SAFE_UPDATES=0;
-- update mse result
UPDATE TestNG
SET mse_result = result
;
-- delete the auc row
DELETE FROM TestNG
WHERE metric_type = 'AUC';
SET SQL_SAFE_UPDATES=1;
-- drop metric_type and result column
ALTER TABLE TestNG DROP COLUMN result;
ALTER TABLE TestNG DROP COLUMN metric_type;
-- finish
SELECT * FROM TestNG;
| |
Add Kev Riley VLF count script | /*
https://gallery.technet.microsoft.com/scriptcenter/SQL-Script-to-list-VLF-e6315249
Kev Riley
*/
--variables to hold each 'iteration'
declare @query varchar(100)
declare @dbname sysname
declare @vlfs int
--table variable used to 'loop' over databases
declare @databases table (dbname sysname)
insert into @databases
--only choose online databases
select name from sys.databases where state = 0
--table variable to hold results
declare @vlfcounts table
(dbname sysname,
vlfcount int)
--table variable to capture DBCC loginfo output
--changes in the output of DBCC loginfo from SQL2012 mean we have to determine the version
declare @MajorVersion tinyint
set @MajorVersion = LEFT(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar(max)),CHARINDEX('.',CAST(SERVERPROPERTY('ProductVersion') AS nvarchar(max)))-1)
if @MajorVersion < 11 -- pre-SQL2012
begin
declare @dbccloginfo table
(
fileid smallint,
file_size bigint,
start_offset bigint,
fseqno int,
[status] tinyint,
parity tinyint,
create_lsn numeric(25,0)
)
while exists(select top 1 dbname from @databases)
begin
set @dbname = (select top 1 dbname from @databases)
set @query = 'dbcc loginfo (' + '''' + @dbname + ''') '
insert into @dbccloginfo
exec (@query)
set @vlfs = @@rowcount
insert @vlfcounts
values(@dbname, @vlfs)
delete from @databases where dbname = @dbname
end --while
end
else
begin
declare @dbccloginfo2012 table
(
RecoveryUnitId int,
fileid smallint,
file_size bigint,
start_offset bigint,
fseqno int,
[status] tinyint,
parity tinyint,
create_lsn numeric(25,0)
)
while exists(select top 1 dbname from @databases)
begin
set @dbname = (select top 1 dbname from @databases)
set @query = 'dbcc loginfo (' + '''' + @dbname + ''') '
insert into @dbccloginfo2012
exec (@query)
set @vlfs = @@rowcount
insert @vlfcounts
values(@dbname, @vlfs)
delete from @databases where dbname = @dbname
end --while
end
--output the full list
select dbname, vlfcount
from @vlfcounts
order by dbname
| |
Create indexes for tables with a FK to job.id | CREATE INDEX log_job_id on log (job_id);
CREATE INDEX performance_job_id on performance (job_id);
CREATE INDEX output_job_id on output (oq_job_id);
| |
Add procedure occurrence dimension with ICD-9-CM | /* Build procedure occurrence dimension table. This uses the ICD-9-CM
* vocabulary. */
INSERT INTO #dim
SELECT
cp.person_id,
SUBSTRING(REPLACE(sm.source_code, '.', '') FROM 2 FOR 3) AS covariate_id,
COUNT(SUBSTRING(REPLACE(sm.source_code, '.', '') FROM 2 FOR 3))
AS covariate_count
FROM
#cohort_person cp INNER JOIN @cdm_schema.dbo.procedure_occurrence po
ON cp.person_id = po.person_id
INNER JOIN vocabulary.source_to_concept_map sm
ON po.procedure_concept_id = sm.target_concept_id
INNER JOIN vocabulary.vocabulary v
ON sm.source_vocabulary_id = v.vocabulary_id
WHERE
cp.cohort_start_date < po.procedure_date
AND po.procedure_date <= cp.cohort_end_date
AND v.vocabulary_name ~* 'ICD-9-C.*'
AND sm.source_code ~ '^V[0-9\.]*$'
GROUP BY
cp.person_id,
covariate_id
;
| |
Add script for reversing incorrectly-set project lat/long coordinates | -- Reverses project lat/long coordinates, as they were set in reversed order initially
UPDATE
civictechprojects_project
SET
project_location_coords = st_geometryfromtext('POINT('|| ST_Y(project_location_coords) ||' '|| ST_X(project_location_coords) ||')', 4326);
| |
Create parameter protocolProjectEndDateNumberOfYears for creating proposal development from IRB protocol | INSERT INTO KRCR_PARM_T (NMSPC_CD, CMPNT_CD, PARM_NM, OBJ_ID, VER_NBR, PARM_TYP_CD, VAL, PARM_DESC_TXT, EVAL_OPRTR_CD, APPL_ID)
VALUES ('KC-PROTOCOL', 'Document', 'protocolProjectEndDateNumberOfYears', SYS_GUID(), 1, 'CONFG', '5', 'Number of Years for Proposal Development Project End Date', 'A', 'KUALI')
/
| |
Make use of language support for pxweb | UPDATE oskari_statistical_datasource SET config='{
"url": "https://pxnet2.stat.fi/pxweb/api/v1/{language}/Kuntien_avainluvut/2018/kuntien_avainluvut_2018_aikasarja.px",
"info": {
"url": "http://www.tilastokeskus.fi"
},
"regionKey": "Alue 2018",
"indicatorKey": "Tiedot",
"hints" : {
"dimensions" : [ {
"id" : "Vuosi",
"sort" : "DESC"
}]
}
}' where locale LIKE '%Tilastokeskus%'; | |
Create a summary table of the extents of the hocr areas | /*
We're going to introduce some data quality here that needs to be
carried through when using this table:
- text with less than 2 characters is skipped
- images with x or y < 100 are skipped
- words who's x0, x1, y0, or y1 are within 10 of the edge of the image
are skipped
*/
CREATE TABLE
text_extent
SELECT
occid,
min(area_x0) as min_area_x0,
min(area_y0) as min_area_y0,
max(area_x1) as max_area_x1,
max(area_y1) as max_area_y1,
min(line_x0) as min_line_x0,
min(line_y0) as min_line_y0,
max(line_x1) as max_line_x1,
max(line_y1) as max_line_y1,
min(word_x0) as min_word_x0,
min(word_y0) as min_word_y0,
max(word_x1) as max_word_x1,
max(word_y1) as max_word_y1
FROM
hocr_results
WHERE
x > 100
AND y > 100
AND area_x0 > 10
AND area_y0 > 10
AND area_x1 < (x - 10)
AND area_y1 < (y - 10)
AND line_x0 > 10
AND line_y0 > 10
AND line_x1 < (x - 10)
AND line_y1 < (y - 10)
AND word_x0 > 10
AND word_y0 > 10
AND word_x1 < (x - 10)
AND word_y1 < (y - 10)
AND LENGTH(TRIM(text))>2
GROUP BY
occid;
| |
Add the ship changes script for current round. | UPDATE smr_new.ship_type SET cost = 12026598 WHERE ship_type_id = 21; -- Federal Warrant to 12,026,598 credits
UPDATE smr_new.ship_type SET cost = 23675738 WHERE ship_type_id = 22; -- Federal Ultimatum to 23,675,738 credits
UPDATE smr_new.ship_type SET cost = 7483452 WHERE ship_type_id = 24; -- Assasin to 7,483,452 credits
UPDATE smr_new.ship_type SET cost = 19890100 WHERE ship_type_id = 25; -- Death Cruiser to 19,890,100 credits | |
Add the SQL for the hello world example | CREATE TABLE HELLOWORLD (
HELLO VARCHAR(15),
WORLD VARCHAR(15),
DIALECT VARCHAR(15) NOT NULL,
PRIMARY KEY (DIALECT)
);
PARTITION TABLE HELLOWORLD ON COLUMN DIALECT;
CREATE PROCEDURE Insert PARTITION ON TABLE Helloworld COLUMN Dialect
AS INSERT INTO HELLOWORLD (Dialect, Hello, World) VALUES (?, ?, ?);
CREATE PROCEDURE Select PARTITION ON TABLE Helloworld COLUMN Dialect
AS SELECT HELLO, WORLD FROM HELLOWORLD WHERE DIALECT = ?;
| |
Add ITN roads view (roadlink with roadname and road.fid) | -- Lookup between road and roadlink on fid
CREATE OR REPLACE VIEW osmm_itn.road_roadlink AS
SELECT road_fid,
replace(roadlink_fid, '#', '') AS roadlink_fid
FROM
(SELECT fid AS road_fid,
unnest(networkmember_href) AS roadlink_fid
FROM osmm_itn.road) AS a;
-- Each roadlink with associated roadname(s) and fid of road in case more info
-- is required
CREATE OR REPLACE VIEW osmm_itn.roads AS
SELECT array_to_string(road.roadname, ', ', '') AS roadname,
road.fid AS road_fid,
roadlink.*
FROM osmm_itn.roadlink AS roadlink
LEFT JOIN osmm_itn.road_roadlink AS road_roadlink ON (roadlink.fid = road_roadlink.roadlink_fid)
LEFT JOIN osmm_itn.road AS road ON (road_roadlink.road_fid = road.fid);
| |
Test script for Niels' newly introduced code + bugs | SET auto_commit=true;
-- the following statements should all fail
ROLLBACK;
COMMIT;
SAVEPOINT failingsavepoint;
-- this one is incorrect since the savepoint should not exist
-- however, the server might answer with an error about auto_commit
-- for that might be cheaper
RELEASE SAVEPOINT failingsavepoint;
| |
Store precedure for insert data into bioassay_data_data_category | -- insert data into bioassay_data_data_category
declare
cursor c_bioassay_data_category is
select chara.NAME name, data.DERIVED_BIOASSAY_DATA_PK_ID id, data.CATEGORY category
from DERIVED_BIOASSAY_DATA data, CHARACTERIZATION chara
where data.CHARACTERIZATION_PK_ID = chara.CHARACTERIZATION_PK_ID
and data.CATEGORY is not null
and data.CATEGORY != 'Image'
and data.CATEGORY != 'Graph'
group by (data.DERIVED_BIOASSAY_DATA_PK_ID, data.CATEGORY, chara.NAME);
v_data_pk_id number;
v_category_index number;
begin
v_category_index := 0;
v_data_pk_id := 0;
for i in c_bioassay_data_category loop
if i.id != v_data_pk_id then
v_data_pk_id := i.id;
v_category_index := 0;
end if;
insert into bioassay_data_data_category
(derived_bioassay_data_pk_id, category_index, category_name, update_date)
values (v_data_pk_id, v_category_index, i.category, sysdate);
v_category_index := v_category_index + 1;
end loop;
end;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.