diff --git a/.circleci/config.yml b/.circleci/config.yml index f2374cd0a1a122b74312a60efec2331b5d82fb46..7ead624eece7db261949b2d6fb4531e2bd022f12 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -96,6 +96,7 @@ jobs: test/results/*/dump/ \ ; docker compose stop --timeout=1 oracle23 + docker compose stop --timeout=1 mysql docker compose down -v docker image ls --all --quiet dalibo/buildpack-mig | xargs docker rmi --force docker image ls --all --quiet mysql | xargs docker rmi --force diff --git a/docker-compose.yml b/docker-compose.yml index 96c93899716bf576615ebf191d1436c9e73800f1..b63bd55182a7d93f6d87576c49bbd939fd48366d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -80,6 +80,7 @@ services: working_dir: /workspace mysql: + profiles: [ci] image: mysql:8.4 volumes: - .:/pg_migrate/ @@ -118,6 +119,18 @@ services: --tmp-table-size=1K, ] + mariadb: + image: mariadb:12 + volumes: + - .:/pg_migrate/ + - ./test/fixtures/mariadb/:/docker-entrypoint-initdb.d/ + ports: ["3308:3306"] + environment: + MARIADB_ROOT_PASSWORD: N0tSecret + MARIADB_USER: sakila + MARIADB_DATABASE: sakila + MARIADB_PASSWORD: N0tSecret + oracle23: profiles: [ci] # prevent starting without explicit profile ports: ["1521:1521"] @@ -153,6 +166,7 @@ services: entrypoint: /pg_migrate/test/docker/oracle-entrypoint.sh oracle11: + profiles: [ci] image: gvenzl/oracle-xe:11-slim ports: ["1521:1521"] working_dir: /pg_migrate/ diff --git a/docs/references/conversion.md b/docs/references/conversion.md index 7cfa8f41ad4dcd53cac512fce6dd80f4de24c786..5f600a102aa631026e04f099ca2156a5ec433125 100644 --- a/docs/references/conversion.md +++ b/docs/references/conversion.md @@ -39,11 +39,13 @@ PostgreSQL Migrator implements column type conversion using the following mappin |------------------|---------------------| | `tinyint(1)` | `boolean` | | `decimal` | `numeric` | - | `year`, `tinyint`, `tinyint unsigned` | `smallint` | - | `smallint unsigned` | `int` | - | `mediumint`, `mediumint unsigned` | `int` | - | `int unsigned` | `bigint` | - | `bigint unsigned` | `numeric(20, 0)` | + | `year`, `year(n)`, `smallint(n)`, `tinyint`, `tinyint(n)`, `tinyint unsigned`, `tinyint(n) unsigned` | `smallint` | + | `smallint unsigned`, `smallint(n) unsigned` | `int` | + | `mediumint`, `mediumint(n)`, `mediumint unsigned`, `mediumint(n) unsigned` | `int` | + | `int(n)` | `int` | + | `int unsigned`, `int(n) unsigned`| `bigint` | + | `bigint(n)` | `bigint` | + | `bigint unsigned`, `bigint(n) unsigned` | `numeric(20, 0)` | | `datetime` | `timestamp` | | `enum` | `text` | | `set` | `text[]` | diff --git a/docs/references/features.md b/docs/references/features.md index 61462eec34f2c5d7e5f0f24bb0999d59e0a2bf2c..eb54936331a90e3ac89af67178f11b64f0eb3a97 100644 --- a/docs/references/features.md +++ b/docs/references/features.md @@ -65,7 +65,7 @@ Here are the implementattion status of the different systems and versions. | System | Support | |----------------------------|------------| | MySQL 8.4 | 🟢 `FI` | - | MariaDB | ⚪ `NP` | + | MariaDB 10.5 to 12 | 🟠 `PI` | === "Other" diff --git a/internal/convert/datatype.go b/internal/convert/datatype.go index a53ce990c4a76ce38fd5368fa4cdc52156873946..ca68838215dc73353c12a64d42c7383839714503 100644 --- a/internal/convert/datatype.go +++ b/internal/convert/datatype.go @@ -114,15 +114,26 @@ func init() { "bit(n)": "bit varying(n)", - "decimal(...)": "numeric(...)", - "tinyint": "smallint", - "tinyint unsigned": "smallint", - "year": "smallint", - "smallint unsigned": "integer", - "mediumint": "integer", - "mediumint unsigned": "integer", - "int unsigned": "bigint", - "bigint unsigned": "numeric(20,0)", + "decimal(...)": "numeric(...)", + "tinyint": "smallint", + "tinyint(n)": "smallint", + "tinyint unsigned": "smallint", + "tinyint(n) unsigned": "smallint", + "year": "smallint", + "year(n)": "smallint", + "smallint(n)": "smallint", + "smallint unsigned": "integer", + "smallint(n) unsigned": "integer", + "mediumint": "integer", + "mediumint(n)": "integer", + "mediumint unsigned": "integer", + "mediumint(n) unsigned": "integer", + "int(n)": "integer", + "int unsigned": "bigint", + "int(n) unsigned": "bigint", + "bigint(n)": "bigint", + "bigint unsigned": "numeric(20,0)", + "bigint(n) unsigned": "numeric(20,0)", "binary_double": "double precision", "double": "double precision", diff --git a/internal/fetch/copy.go b/internal/fetch/copy.go index 9a09ab00124a997d424310d67e2aa556b7706923..102dc6e0f7671446bf898c8280e921738f40871e 100644 --- a/internal/fetch/copy.go +++ b/internal/fetch/copy.go @@ -68,10 +68,6 @@ func CopyTables(p *dispatch.Plan, tables []catalog.Table, srcModel any) { } continue } - src := catalog.Find[catalog.Table](srcModel, convert.SourceMap[t.ObjectPath]) - if src.IsZero() { - panic("bad source table") - } var reqs []string if project.Current.DumpParams.PreData { reqs = append(reqs, t.BuildPath("create")) @@ -79,7 +75,7 @@ func CopyTables(p *dispatch.Plan, tables []catalog.Table, srcModel any) { p.Add(catalog.TruncateTask(t, reqs)) reqs = []string{t.BuildPath("truncate")} } - p.Add(CopyTask(src, t, reqs)) + p.Add(CopyTask(srcModel, t, reqs)) TableCount++ } } @@ -89,31 +85,37 @@ func CopyTables(p *dispatch.Plan, tables []catalog.Table, srcModel any) { // Task weight is source table row count. // Generates a COPY statement for each batch of rows. // Use SetupExecute to configure query output. -func CopyTask(source, target catalog.Table, reqs []string) dispatch.Task { +func CopyTask(source any, target catalog.Table, reqs []string) dispatch.Task { + srcTable := catalog.Find[catalog.Table](source, convert.SourceMap[target.ObjectPath]) + if srcTable.IsZero() { + panic("bad source table") + } t := ©Task{ Header: dispatch.Header{ Id: target.BuildPath("copy"), - Priority: source.Priority(), + Priority: srcTable.Priority(), Reqs: reqs, }, SerialNumber: dispatch.Next(), source: source, + srcTable: srcTable, target: target, - progressWeight: max(1, int64(source.Rows)), + progressWeight: max(1, int64(srcTable.Rows)), } - if source.Rows > 1 && source.Size > 1 { - avgRowSize := source.Size / source.Rows + if srcTable.Rows > 1 && srcTable.Size > 1 { + avgRowSize := srcTable.Size / srcTable.Rows t.inlineLOB = avgRowSize < 65636 } data := make(map[string]any) - data["Schema"] = t.source.Schema - data["Name"] = t.source.Name + data["Source"] = source + data["Schema"] = srcTable.Schema + data["Name"] = srcTable.Name var columns []catalog.Column for _, c := range t.target.Columns { p := convert.SourceMap[c.ObjectPath] - columns = append(columns, t.source.Column(p)) + columns = append(columns, srcTable.Column(p)) } data["Columns"] = columns data["InlineLOB"] = t.inlineLOB @@ -130,8 +132,9 @@ type copyTask struct { dispatch.Header SerialNumber int logName string // unescaped table name for logging - source catalog.Table + source any query string + srcTable catalog.Table target catalog.Table inlineLOB bool factories []processorFactory // Process a column for copy printing. @@ -201,13 +204,12 @@ func (t *copyTask) Run(ctx context.Context) error { } }, } - for i, c := range t.target.Columns { - src := t.source.Column(convert.SourceMap[c.ObjectPath]) + src := t.srcTable.Column(convert.SourceMap[c.ObjectPath]) if !src.LOB() || t.inlineLOB { t.factories[i] = Value( src.ObjectPath, - t.source.Columns[i].Type.Name, + t.srcTable.Columns[i].Type.Name, t.target.Columns[i].Type.Name, ) continue @@ -216,7 +218,8 @@ func (t *copyTask) Run(ctx context.Context) error { lobs := newLobFetcher( src.ObjectPath, Render("select-lob-chunks", map[string]any{ - "Table": t.source, + "Source": t.source, + "Table": t.srcTable, "Column": src, }), ) @@ -339,11 +342,12 @@ func (t *copyTask) batcher(ctx context.Context) { // We buffer rows enough to fill the 64k pgx buffer. buffer := 32 // Number of rows to buffer. - if t.source.Size > 0 && t.source.Rows > 0 { + + if t.srcTable.Size > 0 && t.srcTable.Rows > 0 { // We have table stats, adjust to control memory usage or to prefetch more rows. // Actually, since LOB chunks are streamed in a low buffer chan, // row bigger than 64k are not fully fetched and stored in RAM. - buffer = 2 + 65635/(t.source.Size/t.source.Rows) + buffer = 2 + 65635/(t.srcTable.Size/t.srcTable.Rows) } if t.rows == nil { panic("nil rows chan") diff --git a/internal/mysql/inspect.go b/internal/mysql/inspect.go index f30c2ff65bb33377f2b2e52c05bd6e6183d2349e..61325f3dc67c5337522f234f709768b734856a60 100644 --- a/internal/mysql/inspect.go +++ b/internal/mysql/inspect.go @@ -168,7 +168,7 @@ func BuildRegexp(patterns []string) string { if pos > 0 { regexp += "|" } - regexp += "^" + strings.ReplaceAll(schema, "%", "*") + regexp += "^" + strings.ReplaceAll(schema, "%", ".*") } return regexp } diff --git a/internal/mysql/inspect_test.go b/internal/mysql/inspect_test.go index 6e01863c41479688b65b8fb33d6ba27bf47eaf41..bf62d90dd62d8e2edc8838961fc6fb3dd76559a4 100644 --- a/internal/mysql/inspect_test.go +++ b/internal/mysql/inspect_test.go @@ -10,7 +10,7 @@ import ( func TestBuildRegexp(t *testing.T) { r := require.New(t) patterns := []string{"%"} - r.Equal("^*", mysql.BuildRegexp(patterns)) + r.Equal("^.*", mysql.BuildRegexp(patterns)) patterns = []string{"foo", "bar"} r.Equal("^foo|^bar", mysql.BuildRegexp(patterns)) } diff --git a/internal/mysql/model.go b/internal/mysql/model.go index c6fdfa49d6cdbbcf52bd22d48fdb01cb0f987a4e..cac6e12f44740cec015a6068e6c8882eab0191e9 100644 --- a/internal/mysql/model.go +++ b/internal/mysql/model.go @@ -3,6 +3,7 @@ package mysql import ( "embed" "io/fs" + "strings" _ "github.com/go-sql-driver/mysql" "gitlab.com/dalibo/pg_migrate/internal/audit" @@ -51,3 +52,7 @@ func New() *model { func (mdl *model) IsZero() bool { return mdl == nil || len(mdl.Schemas) == 0 } + +func (mdl *model) Maria() bool { + return strings.Contains(strings.ToLower(mdl.Metadata.Software), "mariadb") +} diff --git a/internal/mysql/sql/checks.sql b/internal/mysql/sql/checks.sql index 5a6a755326bed3203e463535ebaa0fc368811615..33c16a27f72c57527ff7f05831bf6c70da17feec 100644 --- a/internal/mysql/sql/checks.sql +++ b/internal/mysql/sql/checks.sql @@ -1,6 +1,6 @@ -SELECT table_schema, table_name, constraint_name, check_clause +SELECT cons.table_schema, cons.table_name, cons.constraint_name, cond.check_clause FROM information_schema.table_constraints cons JOIN information_schema.check_constraints cond USING (constraint_schema, constraint_name) - WHERE constraint_schema IN ({{ template "stringarray.sql" .Schemas }}) - ORDER BY table_schema, table_name, constraint_name; + WHERE cons.constraint_schema IN ({{ template "stringarray.sql" .Schemas }}) + ORDER BY cons.table_schema, cons.table_name, cons.constraint_name; diff --git a/internal/mysql/sql/columns.sql b/internal/mysql/sql/columns.sql index 87615bbdcad74ebc202ee93e82d03f5f456b71cc..1496b1f298fabbb1902b7cbe2e60084d5201f73b 100644 --- a/internal/mysql/sql/columns.sql +++ b/internal/mysql/sql/columns.sql @@ -10,6 +10,7 @@ SELECT c.table_schema, c.table_name, column_name, column_comment, WHEN 'VIRTUAL GENERATED' THEN 'VIRTUAL' WHEN 'auto_increment' THEN 'ALWAYS AS IDENTITY' WHEN 'DEFAULT_GENERATED on update CURRENT_TIMESTAMP' THEN 'ON UPDATE CURRENT_TIMESTAMP' + WHEN 'on update current_timestamp()' THEN 'ON UPDATE CURRENT_TIMESTAMP' ELSE 'NEVER' END AS "generated" FROM information_schema.columns AS c diff --git a/internal/mysql/sql/indexes-columns.sql b/internal/mysql/sql/indexes-columns.sql index 6572c0a4920104767499cfb4a66206874fbc4d09..316be19651c9eac8cf9cd4ecdd308c3022132680 100644 --- a/internal/mysql/sql/indexes-columns.sql +++ b/internal/mysql/sql/indexes-columns.sql @@ -1,6 +1,10 @@ SELECT index_schema, table_name, index_name, column_name, + {{- if .Maria }} + '' AS expression, + {{- else }} IFNULL(expression, '') AS expression, + {{- end }} (IFNULL(collation, '') = 'D') AS descend, '' AS collation FROM information_schema.statistics diff --git a/internal/mysql/sql/indexes.sql b/internal/mysql/sql/indexes.sql index 19084b2792557d46c20b9f0bff5381f98de33c08..8bbae6668a3dcfdfef755f5766ed9afd8ce18583 100644 --- a/internal/mysql/sql/indexes.sql +++ b/internal/mysql/sql/indexes.sql @@ -4,7 +4,15 @@ SELECT DISTINCT NOT non_unique AS uniqueness, index_type, index_comment, + {{- if .Maria }} + {{- if .Before "10.6"}} + false as invisible + {{- else }} + ignored = 'YES' AS invisible + {{- end }} + {{- else }} is_visible = 'NO' AS invisible + {{- end }} FROM information_schema.statistics WHERE index_name <> 'PRIMARY' AND index_schema IN ({{ template "stringarray.sql" .Schemas }}) diff --git a/internal/mysql/sql/select-lob-chunks.sql b/internal/mysql/sql/select-lob-chunks.sql index 117f41177ce8796c677c9d0e58c1e6b41edfd026..a9f741db3238487ced2e2f15cb29fc1f7d549540 100644 --- a/internal/mysql/sql/select-lob-chunks.sql +++ b/internal/mysql/sql/select-lob-chunks.sql @@ -9,7 +9,11 @@ WITH RECURSIVE SELECT prior.value + 131072 AS value, prior.level + 1 AS level FROM offsets AS prior + {{- if .Source.Maria }} + WHERE prior.level < @@max_recursive_iterations + {{- else }} WHERE prior.level < @@cte_max_recursion_depth + {{- end }} ), `longs` AS ( SELECT row_number() OVER () AS rownum, @@ -19,7 +23,11 @@ WITH RECURSIVE FROM {{ identifier .Table.Schema .Table.Name }} USE INDEX () ) -- 16384 * 128k = 2GB +{{- if .Source.Maria }} +SELECT /*+ SET_VAR(max_recursive_iterations = 16384) */ +{{- else }} SELECT /*+ SET_VAR(cte_max_recursion_depth = 16384) */ +{{- end }} rownum, `len`, substring(`longs`.`value`, 1 + `offsets`.`value`, 131072) AS `chunk` FROM `longs` diff --git a/test/docker/mysql-big.sql b/test/docker/mysql-big.sql index ddd8bcc2f0aa96bfe519fb55b08a731d23f3ec64..6ce0389e3d64094e6713c8a57ef6903ea1049e6f 100644 --- a/test/docker/mysql-big.sql +++ b/test/docker/mysql-big.sql @@ -1,3 +1,5 @@ +SET SESSION cte_max_recursion_depth = 10000; + DROP TABLE IF EXISTS xbigtable; CREATE TABLE xbigtable ( id int AUTO_INCREMENT PRIMARY KEY, diff --git a/test/fixtures/mariadb/00-extra.sql b/test/fixtures/mariadb/00-extra.sql new file mode 100644 index 0000000000000000000000000000000000000000..c77f480383e0539d77034659bb70db822dcfe377 --- /dev/null +++ b/test/fixtures/mariadb/00-extra.sql @@ -0,0 +1,81 @@ +CREATE TABLE x_table ( + id SMALLINT UNSIGNED NOT NULL UNIQUE, + name VARCHAR(50) NOT NULL, + upper_name VARCHAR(50) GENERATED ALWAYS AS (UPPER(name)) VIRTUAL, + lower_name VARCHAR(50) GENERATED ALWAYS AS (LOWER(name)) STORED, + CONSTRAINT CHK_MixedCase_id CHECK (id BETWEEN 1 AND 10), + CONSTRAINT x_table_uk UNIQUE (id, name) +); + +CREATE TABLE x_table_composite_fk ( + x_table_id SMALLINT UNSIGNED NOT NULL UNIQUE, + x_table_name VARCHAR(50) NOT NULL, + CONSTRAINT x_composite_table_pk FOREIGN KEY (x_table_id, x_table_name) REFERENCES x_table (id, name) +); + +CREATE VIEW x_view AS SELECT /*+ BKA(x_table) */ * FROM x_table USE INDEX (x_table_uk) IGNORE INDEX(x_table_uk); + +CREATE INDEX x_invisible_idx ON x_table (id) IGNORED; + +CREATE TABLE part_range ( + id SMALLINT NOT NULL, + year INTEGER NOT NULL, + month INTEGER, + CONSTRAINT pk_part_range PRIMARY KEY (year, id) +) +PARTITION BY RANGE (year) +( + PARTITION p_past VALUES LESS THAN (2019), + PARTITION p_2019 VALUES LESS THAN (2020), + PARTITION p_2020 VALUES LESS THAN (2021), + PARTITION p_current VALUES LESS THAN (MAXVALUE) +); + +INSERT INTO part_range (id, year) VALUES (1, 2019); + +CREATE TABLE part_list_sub ( + id SMALLINT NOT NULL, + region VARCHAR(20) NOT NULL +) +PARTITION BY LIST COLUMNS (region) +SUBPARTITION BY HASH (id) +SUBPARTITIONS 2 +( + PARTITION p_east VALUES IN ('EAST', 'WEST'), + PARTITION p_north VALUES IN ('NORTH'), + PARTITION p_south VALUES IN ('SOUTH') +); + +INSERT INTO part_list_sub (id, region) VALUES (1, 'EAST'); + +CREATE EVENT IF NOT EXISTS myjob +ON SCHEDULE EVERY 1 DAY +STARTS NOW() +DO + SELECT 1; + +CREATE EVENT IF NOT EXISTS myjob_expired +ON SCHEDULE EVERY 1 SECOND +STARTS NOW() +ENDS NOW() + INTERVAL 1 SECOND +ON COMPLETION PRESERVE +DO + SELECT 1; + +CREATE TABLE x_table_csv( + id SMALLINT UNSIGNED NOT NULL, + name VARCHAR(50) NOT NULL +) +ENGINE=CSV; + +CREATE TABLE x_null_empty( + id SMALLINT UNSIGNED NOT NULL UNIQUE, + t VARCHAR(10) +); + +-- data to test null vs empty string conversion. +INSERT INTO x_null_empty VALUES +(1, ''), +(2, NULL); + +GRANT SELECT ON mysql.servers TO 'sakila'; diff --git a/test/fixtures/mariadb/00-sakila-schema.sql b/test/fixtures/mariadb/00-sakila-schema.sql new file mode 100644 index 0000000000000000000000000000000000000000..1065c2bb2f33d48f1003fff01728edef3e829b37 --- /dev/null +++ b/test/fixtures/mariadb/00-sakila-schema.sql @@ -0,0 +1,637 @@ +-- Sakila Sample Database Schema +-- Version 0.8 + +-- Copyright (c) 2006, MySQL AB +-- All rights reserved. + +-- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +-- * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +-- * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +-- * Neither the name of MySQL AB nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0; +SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0; +SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL'; + +-- +-- Table structure for table `actor` +-- + +CREATE TABLE actor ( + actor_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + first_name VARCHAR(45) NOT NULL, + last_name VARCHAR(45) NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (actor_id), + KEY idx_actor_last_name (last_name) +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `address` +-- + +CREATE TABLE address ( + address_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + address VARCHAR(50) NOT NULL, + address2 VARCHAR(50) DEFAULT NULL, + district VARCHAR(20) NOT NULL, + city_id SMALLINT UNSIGNED NOT NULL, + postal_code VARCHAR(10) DEFAULT NULL, + phone VARCHAR(20) NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (address_id), + KEY idx_fk_city_id (city_id), + CONSTRAINT `fk_address_city` FOREIGN KEY (city_id) REFERENCES city (city_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `category` +-- + +CREATE TABLE category ( + category_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT, + name VARCHAR(25) NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (category_id) +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `city` +-- + +CREATE TABLE city ( + city_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + city VARCHAR(50) NOT NULL, + country_id SMALLINT UNSIGNED NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (city_id), + KEY idx_fk_country_id (country_id), + CONSTRAINT `fk_city_country` FOREIGN KEY (country_id) REFERENCES country (country_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `country` +-- + +CREATE TABLE country ( + country_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + country VARCHAR(50) NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (country_id) +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `customer` +-- + +CREATE TABLE customer ( + customer_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + store_id TINYINT UNSIGNED NOT NULL, + first_name VARCHAR(45) NOT NULL, + last_name VARCHAR(45) NOT NULL, + email VARCHAR(50) DEFAULT NULL, + address_id SMALLINT UNSIGNED NOT NULL, + active BOOLEAN NOT NULL DEFAULT TRUE, + create_date DATETIME NOT NULL, + last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (customer_id), + KEY idx_fk_store_id (store_id), + KEY idx_fk_address_id (address_id), + KEY idx_last_name (last_name), + CONSTRAINT fk_customer_address FOREIGN KEY (address_id) REFERENCES address (address_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_customer_store FOREIGN KEY (store_id) REFERENCES store (store_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `film` +-- + +CREATE TABLE film ( + film_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + title VARCHAR(255) NOT NULL, + description TEXT DEFAULT NULL, + release_year YEAR DEFAULT NULL, + language_id TINYINT UNSIGNED NOT NULL, + original_language_id TINYINT UNSIGNED DEFAULT NULL, + rental_duration TINYINT UNSIGNED NOT NULL DEFAULT 3, + rental_rate DECIMAL(4,2) NOT NULL DEFAULT 4.99, + length SMALLINT UNSIGNED DEFAULT NULL, + replacement_cost DECIMAL(5,2) NOT NULL DEFAULT 19.99, + rating ENUM('G','PG','PG-13','R','NC-17') DEFAULT 'G', + special_features SET('Trailers','Commentaries','Deleted Scenes','Behind the Scenes') DEFAULT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (film_id), + KEY idx_title (title), + KEY idx_fk_language_id (language_id), + KEY idx_fk_original_language_id (original_language_id), + CONSTRAINT fk_film_language FOREIGN KEY (language_id) REFERENCES language (language_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_film_language_original FOREIGN KEY (original_language_id) REFERENCES language (language_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `film_actor` +-- + +CREATE TABLE film_actor ( + actor_id SMALLINT UNSIGNED NOT NULL, + film_id SMALLINT UNSIGNED NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (actor_id,film_id), + KEY idx_fk_film_id (`film_id`), + CONSTRAINT fk_film_actor_actor FOREIGN KEY (actor_id) REFERENCES actor (actor_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_film_actor_film FOREIGN KEY (film_id) REFERENCES film (film_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `film_category` +-- + +CREATE TABLE film_category ( + film_id SMALLINT UNSIGNED NOT NULL, + category_id TINYINT UNSIGNED NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (film_id, category_id), + CONSTRAINT fk_film_category_film FOREIGN KEY (film_id) REFERENCES film (film_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_film_category_category FOREIGN KEY (category_id) REFERENCES category (category_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `film_text` +-- + +CREATE TABLE film_text ( + film_id SMALLINT NOT NULL, + title VARCHAR(255) NOT NULL, + description TEXT, + PRIMARY KEY (film_id), + FULLTEXT KEY idx_title_description (title,description) +)ENGINE=MyISAM DEFAULT CHARSET=utf8; + +-- +-- Triggers for loading film_text from film +-- + +DELIMITER ;; +CREATE TRIGGER `ins_film` AFTER INSERT ON `film` FOR EACH ROW BEGIN + INSERT INTO film_text (film_id, title, description) + VALUES (new.film_id, new.title, new.description); + END;; + + +CREATE TRIGGER `upd_film` AFTER UPDATE ON `film` FOR EACH ROW BEGIN + IF (old.title != new.title) or (old.description != new.description) + THEN + UPDATE film_text + SET title=new.title, + description=new.description, + film_id=new.film_id + WHERE film_id=old.film_id; + END IF; + END;; + + +CREATE TRIGGER `del_film` AFTER DELETE ON `film` FOR EACH ROW BEGIN + DELETE FROM film_text WHERE film_id = old.film_id; + END;; + +DELIMITER ; + +-- +-- Table structure for table `inventory` +-- + +CREATE TABLE inventory ( + inventory_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT, + film_id SMALLINT UNSIGNED NOT NULL, + store_id TINYINT UNSIGNED NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (inventory_id), + KEY idx_fk_film_id (film_id), + KEY idx_store_id_film_id (store_id,film_id), + CONSTRAINT fk_inventory_store FOREIGN KEY (store_id) REFERENCES store (store_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_inventory_film FOREIGN KEY (film_id) REFERENCES film (film_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `language` +-- + +CREATE TABLE language ( + language_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT, + name CHAR(20) NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (language_id) +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `payment` +-- + +CREATE TABLE payment ( + payment_id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT, + customer_id SMALLINT UNSIGNED NOT NULL, + staff_id TINYINT UNSIGNED NOT NULL, + rental_id INT DEFAULT NULL, + amount DECIMAL(5,2) NOT NULL, + payment_date DATETIME NOT NULL, + last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (payment_id), + KEY idx_fk_staff_id (staff_id), + KEY idx_fk_customer_id (customer_id), + CONSTRAINT fk_payment_rental FOREIGN KEY (rental_id) REFERENCES rental (rental_id) ON DELETE SET NULL ON UPDATE CASCADE, + CONSTRAINT fk_payment_customer FOREIGN KEY (customer_id) REFERENCES customer (customer_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_payment_staff FOREIGN KEY (staff_id) REFERENCES staff (staff_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +-- +-- Table structure for table `rental` +-- + +CREATE TABLE rental ( + rental_id INT NOT NULL AUTO_INCREMENT, + rental_date DATETIME NOT NULL, + inventory_id MEDIUMINT UNSIGNED NOT NULL, + customer_id SMALLINT UNSIGNED NOT NULL, + return_date DATETIME DEFAULT NULL, + staff_id TINYINT UNSIGNED NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (rental_id), + UNIQUE KEY (rental_date,inventory_id,customer_id), + KEY idx_fk_inventory_id (inventory_id), + KEY idx_fk_customer_id (customer_id), + KEY idx_fk_staff_id (staff_id), + CONSTRAINT fk_rental_staff FOREIGN KEY (staff_id) REFERENCES staff (staff_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_rental_inventory FOREIGN KEY (inventory_id) REFERENCES inventory (inventory_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_rental_customer FOREIGN KEY (customer_id) REFERENCES customer (customer_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `staff` +-- + +CREATE TABLE staff ( + staff_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT, + first_name VARCHAR(45) NOT NULL, + last_name VARCHAR(45) NOT NULL, + address_id SMALLINT UNSIGNED NOT NULL, + picture BLOB DEFAULT NULL, + email VARCHAR(50) DEFAULT NULL, + store_id TINYINT UNSIGNED NOT NULL, + active BOOLEAN NOT NULL DEFAULT TRUE, + username VARCHAR(16) NOT NULL, + password VARCHAR(40) BINARY DEFAULT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (staff_id), + KEY idx_fk_store_id (store_id), + KEY idx_fk_address_id (address_id), + CONSTRAINT fk_staff_store FOREIGN KEY (store_id) REFERENCES store (store_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_staff_address FOREIGN KEY (address_id) REFERENCES address (address_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- Table structure for table `store` +-- + +CREATE TABLE store ( + store_id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT, + manager_staff_id TINYINT UNSIGNED NOT NULL, + address_id SMALLINT UNSIGNED NOT NULL, + last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (store_id), + UNIQUE KEY idx_unique_manager (manager_staff_id), + KEY idx_fk_address_id (address_id), + CONSTRAINT fk_store_staff FOREIGN KEY (manager_staff_id) REFERENCES staff (staff_id) ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT fk_store_address FOREIGN KEY (address_id) REFERENCES address (address_id) ON DELETE RESTRICT ON UPDATE CASCADE +)ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- +-- View structure for view `customer_list` +-- + +CREATE VIEW customer_list +AS +SELECT cu.customer_id AS ID, CONCAT(cu.first_name, _utf8' ', cu.last_name) AS name, a.address AS address, a.postal_code AS `zip code`, + a.phone AS phone, city.city AS city, country.country AS country, IF(cu.active, _utf8'active',_utf8'') AS notes, cu.store_id AS SID +FROM customer AS cu JOIN address AS a ON cu.address_id = a.address_id JOIN city ON a.city_id = city.city_id + JOIN country ON city.country_id = country.country_id; + +-- +-- View structure for view `film_list` +-- + +CREATE VIEW film_list +AS +SELECT film.film_id AS FID, film.title AS title, film.description AS description, category.name AS category, film.rental_rate AS price, + film.length AS length, film.rating AS rating, GROUP_CONCAT(CONCAT(actor.first_name, _utf8' ', actor.last_name) SEPARATOR ', ') AS actors +FROM category LEFT JOIN film_category ON category.category_id = film_category.category_id LEFT JOIN film ON film_category.film_id = film.film_id + JOIN film_actor ON film.film_id = film_actor.film_id + JOIN actor ON film_actor.actor_id = actor.actor_id +GROUP BY film.film_id; + +-- +-- View structure for view `nicer_but_slower_film_list` +-- + +CREATE VIEW nicer_but_slower_film_list +AS +SELECT film.film_id AS FID, film.title AS title, film.description AS description, category.name AS category, film.rental_rate AS price, + film.length AS length, film.rating AS rating, GROUP_CONCAT(CONCAT(CONCAT(UCASE(SUBSTR(actor.first_name,1,1)), + LCASE(SUBSTR(actor.first_name,2,LENGTH(actor.first_name))),_utf8' ',CONCAT(UCASE(SUBSTR(actor.last_name,1,1)), + LCASE(SUBSTR(actor.last_name,2,LENGTH(actor.last_name)))))) SEPARATOR ', ') AS actors +FROM category LEFT JOIN film_category ON category.category_id = film_category.category_id LEFT JOIN film ON film_category.film_id = film.film_id + JOIN film_actor ON film.film_id = film_actor.film_id + JOIN actor ON film_actor.actor_id = actor.actor_id +GROUP BY film.film_id; + +-- +-- View structure for view `staff_list` +-- + +CREATE VIEW staff_list +AS +SELECT s.staff_id AS ID, CONCAT(s.first_name, _utf8' ', s.last_name) AS name, a.address AS address, a.postal_code AS `zip code`, a.phone AS phone, + city.city AS city, country.country AS country, s.store_id AS SID +FROM staff AS s JOIN address AS a ON s.address_id = a.address_id JOIN city ON a.city_id = city.city_id + JOIN country ON city.country_id = country.country_id; + +-- +-- View structure for view `sales_by_store` +-- + +CREATE VIEW sales_by_store +AS +SELECT +CONCAT(c.city, _utf8',', cy.country) AS store +, CONCAT(m.first_name, _utf8' ', m.last_name) AS manager +, SUM(p.amount) AS total_sales +FROM payment AS p +INNER JOIN rental AS r ON p.rental_id = r.rental_id +INNER JOIN inventory AS i ON r.inventory_id = i.inventory_id +INNER JOIN store AS s ON i.store_id = s.store_id +INNER JOIN address AS a ON s.address_id = a.address_id +INNER JOIN city AS c ON a.city_id = c.city_id +INNER JOIN country AS cy ON c.country_id = cy.country_id +INNER JOIN staff AS m ON s.manager_staff_id = m.staff_id +GROUP BY s.store_id +ORDER BY cy.country, c.city; + +-- +-- View structure for view `sales_by_film_category` +-- +-- Note that total sales will add up to >100% because +-- some titles belong to more than 1 category +-- + +CREATE VIEW sales_by_film_category +AS +SELECT +c.name AS category +, SUM(p.amount) AS total_sales +FROM payment AS p +INNER JOIN rental AS r ON p.rental_id = r.rental_id +INNER JOIN inventory AS i ON r.inventory_id = i.inventory_id +INNER JOIN film AS f ON i.film_id = f.film_id +INNER JOIN film_category AS fc ON f.film_id = fc.film_id +INNER JOIN category AS c ON fc.category_id = c.category_id +GROUP BY c.name +ORDER BY total_sales DESC; + +-- +-- View structure for view `actor_info` +-- + +CREATE DEFINER=CURRENT_USER SQL SECURITY INVOKER VIEW actor_info +AS +SELECT +a.actor_id, +a.first_name, +a.last_name, +GROUP_CONCAT(DISTINCT CONCAT(c.name, ': ', + (SELECT GROUP_CONCAT(f.title ORDER BY f.title SEPARATOR ', ') + FROM sakila.film f + INNER JOIN sakila.film_category fc + ON f.film_id = fc.film_id + INNER JOIN sakila.film_actor fa + ON f.film_id = fa.film_id + WHERE fc.category_id = c.category_id + AND fa.actor_id = a.actor_id + ) + ) + ORDER BY c.name SEPARATOR '; ') +AS film_info +FROM sakila.actor a +LEFT JOIN sakila.film_actor fa + ON a.actor_id = fa.actor_id +LEFT JOIN sakila.film_category fc + ON fa.film_id = fc.film_id +LEFT JOIN sakila.category c + ON fc.category_id = c.category_id +GROUP BY a.actor_id, a.first_name, a.last_name; + +-- +-- Procedure structure for procedure `rewards_report` +-- + +DELIMITER // + +CREATE DEFINER = 'sakila' PROCEDURE rewards_report ( + IN min_monthly_purchases TINYINT UNSIGNED + , IN min_dollar_amount_purchased DECIMAL(10,2) UNSIGNED + , OUT count_rewardees INT +) +LANGUAGE SQL +NOT DETERMINISTIC +READS SQL DATA +SQL SECURITY DEFINER +COMMENT 'Provides a customizable report on best customers' +proc: BEGIN + + DECLARE last_month_start DATE; + DECLARE last_month_end DATE; + + /* Some sanity checks... */ + IF min_monthly_purchases = 0 THEN + SELECT 'Minimum monthly purchases parameter must be > 0'; + LEAVE proc; + END IF; + IF min_dollar_amount_purchased = 0.00 THEN + SELECT 'Minimum monthly dollar amount purchased parameter must be > $0.00'; + LEAVE proc; + END IF; + + /* Determine start and end time periods */ + SET last_month_start = DATE_SUB(CURRENT_DATE(), INTERVAL 1 MONTH); + SET last_month_start = STR_TO_DATE(CONCAT(YEAR(last_month_start),'-',MONTH(last_month_start),'-01'),'%Y-%m-%d'); + SET last_month_end = LAST_DAY(last_month_start); + + /* + Create a temporary storage area for + Customer IDs. + */ + CREATE TEMPORARY TABLE tmpCustomer (customer_id SMALLINT UNSIGNED NOT NULL PRIMARY KEY); + + /* + Find all customers meeting the + monthly purchase requirements + */ + INSERT INTO tmpCustomer (customer_id) + SELECT p.customer_id + FROM payment AS p + WHERE DATE(p.payment_date) BETWEEN last_month_start AND last_month_end + GROUP BY customer_id + HAVING SUM(p.amount) > min_dollar_amount_purchased + AND COUNT(customer_id) > min_monthly_purchases; + + /* Populate OUT parameter with count of found customers */ + SELECT COUNT(*) FROM tmpCustomer INTO count_rewardees; + + /* + Output ALL customer information of matching rewardees. + Customize output as needed. + */ + SELECT /*+ HINT */ c.* + FROM tmpCustomer AS t + INNER JOIN customer AS c ON t.customer_id = c.customer_id; + + /* Clean up */ + DROP TABLE tmpCustomer; +END // + +DELIMITER ; + +DELIMITER $$ + +CREATE DEFINER = 'sakila' FUNCTION get_customer_balance(p_customer_id INT, p_effective_date DATETIME) RETURNS DECIMAL(5,2) + DETERMINISTIC + READS SQL DATA +BEGIN + + #OK, WE NEED TO CALCULATE THE CURRENT BALANCE GIVEN A CUSTOMER_ID AND A DATE + #THAT WE WANT THE BALANCE TO BE EFFECTIVE FOR. THE BALANCE IS: + # 1) RENTAL FEES FOR ALL PREVIOUS RENTALS + # 2) ONE DOLLAR FOR EVERY DAY THE PREVIOUS RENTALS ARE OVERDUE + # 3) IF A FILM IS MORE THAN RENTAL_DURATION * 2 OVERDUE, CHARGE THE REPLACEMENT_COST + # 4) SUBTRACT ALL PAYMENTS MADE BEFORE THE DATE SPECIFIED + + DECLARE v_rentfees DECIMAL(5,2); #FEES PAID TO RENT THE VIDEOS INITIALLY + DECLARE v_overfees INTEGER; #LATE FEES FOR PRIOR RENTALS + DECLARE v_payments DECIMAL(5,2); #SUM OF PAYMENTS MADE PREVIOUSLY + + SELECT IFNULL(SUM(film.rental_rate),0) INTO v_rentfees + FROM film, inventory, rental + WHERE film.film_id = inventory.film_id + AND inventory.inventory_id = rental.inventory_id + AND rental.rental_date <= p_effective_date + AND rental.customer_id = p_customer_id; + + SELECT IFNULL(SUM(IF((TO_DAYS(rental.return_date) - TO_DAYS(rental.rental_date)) > film.rental_duration, + ((TO_DAYS(rental.return_date) - TO_DAYS(rental.rental_date)) - film.rental_duration),0)),0) INTO v_overfees + FROM rental, inventory, film + WHERE film.film_id = inventory.film_id + AND inventory.inventory_id = rental.inventory_id + AND rental.rental_date <= p_effective_date + AND rental.customer_id = p_customer_id; + + + SELECT IFNULL(SUM(payment.amount),0) INTO v_payments + FROM payment + + WHERE payment.payment_date <= p_effective_date + AND payment.customer_id = p_customer_id; + + RETURN v_rentfees + v_overfees - v_payments; +END $$ + +DELIMITER ; + +DELIMITER $$ + +CREATE DEFINER = 'sakila' PROCEDURE film_in_stock(IN p_film_id INT, IN p_store_id INT, OUT p_film_count INT) +READS SQL DATA +BEGIN + SELECT inventory_id + FROM inventory + WHERE film_id = p_film_id + AND store_id = p_store_id + AND inventory_in_stock(inventory_id); + + SELECT FOUND_ROWS() INTO p_film_count; +END $$ + +DELIMITER ; + +DELIMITER $$ + +CREATE DEFINER = 'sakila' PROCEDURE film_not_in_stock(IN p_film_id INT, IN p_store_id INT, OUT p_film_count INT) +READS SQL DATA +BEGIN + SELECT inventory_id + FROM inventory + WHERE film_id = p_film_id + AND store_id = p_store_id + AND NOT inventory_in_stock(inventory_id); + + SELECT FOUND_ROWS() INTO p_film_count; +END $$ + +DELIMITER ; + +DELIMITER $$ + +CREATE DEFINER = 'sakila' FUNCTION inventory_held_by_customer(p_inventory_id INT) RETURNS INT +READS SQL DATA +BEGIN + DECLARE v_customer_id INT; + DECLARE EXIT HANDLER FOR NOT FOUND RETURN NULL; + + SELECT customer_id INTO v_customer_id + FROM rental + WHERE return_date IS NULL + AND inventory_id = p_inventory_id; + + RETURN v_customer_id; +END $$ + +DELIMITER ; + +DELIMITER $$ + +CREATE DEFINER = 'sakila' FUNCTION inventory_in_stock(p_inventory_id INT) RETURNS BOOLEAN +READS SQL DATA +BEGIN + DECLARE v_rentals INT; + DECLARE v_out INT; + + #AN ITEM IS IN-STOCK IF THERE ARE EITHER NO ROWS IN THE rental TABLE + #FOR THE ITEM OR ALL ROWS HAVE return_date POPULATED + + SELECT COUNT(*) INTO v_rentals + FROM rental + WHERE inventory_id = p_inventory_id; + + IF v_rentals = 0 THEN + RETURN TRUE; + END IF; + + SELECT COUNT(rental_id) INTO v_out + FROM inventory LEFT JOIN rental USING(inventory_id) + WHERE inventory.inventory_id = p_inventory_id + AND rental.return_date IS NULL; + + IF v_out > 0 THEN + RETURN FALSE; + ELSE + RETURN TRUE; + END IF; +END $$ + +DELIMITER ; + +SET SQL_MODE=@OLD_SQL_MODE; +SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS; +SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS; diff --git a/test/fixtures/mariadb/01-sakila-data.sql.gz b/test/fixtures/mariadb/01-sakila-data.sql.gz new file mode 100644 index 0000000000000000000000000000000000000000..8ddd9d9d39c15f5d747baae3a0bd19e789dfb88e Binary files /dev/null and b/test/fixtures/mariadb/01-sakila-data.sql.gz differ diff --git a/test/fixtures/mariadb/02-sakila-analyze.sql b/test/fixtures/mariadb/02-sakila-analyze.sql new file mode 100644 index 0000000000000000000000000000000000000000..cbc35fcfbd222bd01676e5b9ccd07f155a71c81d --- /dev/null +++ b/test/fixtures/mariadb/02-sakila-analyze.sql @@ -0,0 +1,32 @@ +-- This scripts workaround bad auto_increment in MySQL table InnoDB status. +-- +-- When inserting data using root user and socket, the mysqld service will +-- compute wrong auto_increment value upon start. +-- +-- This script refresh statistics at end of initdb process. innodb cache +-- lifetime must be enough so that mysqld does not recomputes statistics on +-- startup. +-- +USE sakila; + +-- Reset table statistics to have effective auto_increment value. +ANALYZE TABLE actor; +ANALYZE TABLE address; +ANALYZE TABLE category; +ANALYZE TABLE city; +ANALYZE TABLE country; +ANALYZE TABLE customer; +ANALYZE TABLE film; +ANALYZE TABLE film_actor; +ANALYZE TABLE film_category; +-- Skip MyISAM table! +-- ANALYZE TABLE film_text; +ANALYZE TABLE inventory; +ANALYZE TABLE language; +ANALYZE TABLE payment; +ANALYZE TABLE rental; +ANALYZE TABLE staff; +ANALYZE TABLE store; + +-- Trigger analyze and shows values in docker entrypoint logs. +SHOW TABLE STATUS;