feat: add full Zonemaster stack with Docker and Spanish UI
- Clone all 5 Zonemaster component repos (LDNS, Engine, CLI, Backend, GUI) - Dockerfile.backend: 8-stage multi-stage build LDNS→Engine→CLI→Backend - Dockerfile.gui: Astro static build served via nginx - docker-compose.yml: backend (internal) + frontend (port 5353) - nginx.conf: root redirects to /es/, /api/ proxied to backend - zonemaster-gui/config.ts: defaultLanguage set to 'es' (Spanish) Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2
zonemaster-backend/share/patch/README.txt
Normal file
2
zonemaster-backend/share/patch/README.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Find instructions on patching (upgrading) the Zonemaster database
|
||||
on https://github.com/zonemaster/zonemaster/blob/master/docs/public/upgrading/backend.md
|
||||
@@ -0,0 +1,330 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use List::MoreUtils qw(zip_unflatten);
|
||||
use JSON::PP;
|
||||
use Try::Tiny;
|
||||
use File::Temp qw(tempfile);
|
||||
use Encode qw(find_encoding);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Engine;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
|
||||
my %module_mapping;
|
||||
for my $module ( Zonemaster::Engine->modules ) {
|
||||
$module_mapping{lc $module} = $module;
|
||||
}
|
||||
|
||||
my %patch = (
|
||||
mysql => \&patch_db_mysql,
|
||||
postgresql => \&patch_db_postgresql,
|
||||
sqlite => \&patch_db_sqlite,
|
||||
);
|
||||
|
||||
my $db_engine = $config->DB_engine;
|
||||
print "Configured database engine: $db_engine\n";
|
||||
|
||||
if ( $db_engine =~ /^(MySQL|PostgreSQL|SQLite)$/ ) {
|
||||
print( "Starting database migration\n" );
|
||||
$patch{ lc $db_engine }();
|
||||
print( "\nMigration done\n" );
|
||||
}
|
||||
else {
|
||||
die "Unknown database engine configured: $db_engine\n";
|
||||
}
|
||||
|
||||
# depending on the resources available to select all data in database
|
||||
# update $row_count to your needs
|
||||
sub _update_data_result_entries {
|
||||
my ( $dbh, $row_count ) = @_;
|
||||
|
||||
my $json = JSON::PP->new->allow_blessed->convert_blessed->canonical;
|
||||
|
||||
# update only jobs with results
|
||||
my ( $row_total ) = $dbh->selectrow_array( 'SELECT count(*) FROM test_results WHERE results IS NOT NULL' );
|
||||
print "Will update $row_total rows\n";
|
||||
|
||||
my %levels = Zonemaster::Engine::Logger::Entry->levels();
|
||||
|
||||
my $row_done = 0;
|
||||
while ( $row_done < $row_total ) {
|
||||
print "Progress update: $row_done / $row_total\n";
|
||||
my $row_updated = 0;
|
||||
my $sth1 = $dbh->prepare( 'SELECT hash_id, results FROM test_results WHERE results IS NOT NULL ORDER BY id ASC LIMIT ? OFFSET ?' );
|
||||
$sth1->execute( $row_count, $row_done );
|
||||
while ( my $row = $sth1->fetchrow_arrayref ) {
|
||||
my ( $hash_id, $results ) = @$row;
|
||||
|
||||
next unless $results;
|
||||
|
||||
my @records;
|
||||
my $entries = $json->decode( $results );
|
||||
|
||||
foreach my $m ( @$entries ) {
|
||||
my $module = $module_mapping{ lc $m->{module} } // ucfirst lc $m->{module};
|
||||
my $testcase =
|
||||
( !defined $m->{testcase} or $m->{testcase} eq 'UNSPECIFIED' )
|
||||
? 'Unspecified'
|
||||
: $m->{testcase} =~ s/[a-z_]*/$module/ir;
|
||||
|
||||
if ($testcase eq 'Delegation01' and $m->{tag} =~ /^(NOT_)?ENOUGH_IPV[46]_NS_(CHILD|DEL)$/) {
|
||||
my @ips = split( /;/, delete $m->{args}{ns_ip_list} );
|
||||
my @names = split( /;/, delete $m->{args}{nsname_list} );
|
||||
my @ns_list = map { join( '/', @$_ ) } zip_unflatten(@names, @ips);
|
||||
$m->{args}{ns_list} = join( ';', @ns_list );
|
||||
}
|
||||
|
||||
my $r = [
|
||||
$hash_id,
|
||||
$levels{ $m->{level} },
|
||||
$module,
|
||||
$testcase,
|
||||
$m->{tag},
|
||||
$m->{timestamp},
|
||||
$json->encode( $m->{args} // {} ),
|
||||
];
|
||||
|
||||
push @records, $r;
|
||||
}
|
||||
|
||||
my $query_values = join ", ", ("(?, ?, ?, ?, ?, ?, ?)") x @records;
|
||||
my $query = "INSERT INTO result_entries (hash_id, level, module, testcase, tag, timestamp, args) VALUES $query_values";
|
||||
my $sth = $dbh->prepare( $query );
|
||||
$sth = $sth->execute( map { @$_ } @records );
|
||||
|
||||
$row_updated += $dbh->do( "UPDATE test_results SET results = NULL WHERE hash_id = ?", undef, $hash_id );
|
||||
}
|
||||
|
||||
# increase by min(row_updated, row_count)
|
||||
$row_done += ( $row_updated < $row_count ) ? $row_updated : $row_count;
|
||||
}
|
||||
print "Progress update: $row_done / $row_total\n";
|
||||
}
|
||||
|
||||
sub _update_data_normalize_domains {
|
||||
my ( $db ) = @_;
|
||||
|
||||
my ( $row_total ) = $db->dbh->selectrow_array( 'SELECT count(*) FROM test_results' );
|
||||
print "Will update $row_total rows\n";
|
||||
|
||||
|
||||
my $sth1 = $db->dbh->prepare( 'SELECT hash_id, params FROM test_results' );
|
||||
$sth1->execute;
|
||||
|
||||
my $row_done = 0;
|
||||
my $progress = 0;
|
||||
|
||||
while ( my $row = $sth1->fetchrow_hashref ) {
|
||||
my $hash_id = $row->{hash_id};
|
||||
eval {
|
||||
my $raw_params = decode_json($row->{params});
|
||||
my $domain = $raw_params->{domain};
|
||||
|
||||
# This has never been cleaned
|
||||
delete $raw_params->{user_ip};
|
||||
|
||||
my $params = $db->encode_params( $raw_params );
|
||||
my $fingerprint = $db->generate_fingerprint( $raw_params );
|
||||
|
||||
$domain = Zonemaster::Backend::DB::_normalize_domain( $domain );
|
||||
|
||||
$db->dbh->do('UPDATE test_results SET domain = ?, params = ?, fingerprint = ? where hash_id = ?', undef, $domain, $params, $fingerprint, $hash_id);
|
||||
};
|
||||
if ($@) {
|
||||
warn "Caught error while updating record with hash id $hash_id, ignoring: $@\n";
|
||||
}
|
||||
$row_done += 1;
|
||||
my $new_progress = int(($row_done / $row_total) * 100);
|
||||
if ( $new_progress != $progress ) {
|
||||
$progress = $new_progress;
|
||||
print("$progress%\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub patch_db_mysql {
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $db = Zonemaster::Backend::DB::MySQL->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
$dbh->{AutoCommit} = 0;
|
||||
|
||||
try {
|
||||
$db->create_schema();
|
||||
|
||||
print( "\n-> (1/2) Populating new result_entries table\n" );
|
||||
_update_data_result_entries( $dbh, 50000 );
|
||||
|
||||
print( "\n-> (2/2) Normalizing domain names\n" );
|
||||
_update_data_normalize_domains( $db );
|
||||
|
||||
$dbh->commit();
|
||||
} catch {
|
||||
print( "\nCould not upgrade database: " . $_ );
|
||||
|
||||
$dbh->rollback();
|
||||
};
|
||||
}
|
||||
|
||||
sub _patch_db_postgresql_step1 {
|
||||
my ($dbh, $chunk_size) = @_;
|
||||
$chunk_size //= 100_000;
|
||||
|
||||
# This is used later for backslash-escaping data supplied to COPY … FROM
|
||||
# STDIN commands.
|
||||
my %conv = ( 8 => '\b', 9 => '\t', 10 => '\n', 11 => '\v', 12 => '\f', 13 => '\r', 92 => '\\\\' );
|
||||
|
||||
my $utf8 = find_encoding('utf8');
|
||||
|
||||
# Why a cursor instead of a plain SELECT statement? Because DBD::Pg does
|
||||
# not use server-side cursors itself when reading the result of a SELECT
|
||||
# query.
|
||||
#
|
||||
# And why is that a problem? That’s because the DBMS will try to compute
|
||||
# the entire result set before handing it to the client. With large
|
||||
# Zonemaster setups with years of history and millions of tests, this
|
||||
# SELECT statement will generate hundreds of millions of rows. So without
|
||||
# the appropriate precautions, a plain SELECT query like this one will
|
||||
# definitely take out the machine it is running on!
|
||||
print("Starting up\n");
|
||||
$dbh->do(q[
|
||||
DECLARE curs NO SCROLL CURSOR FOR
|
||||
SELECT
|
||||
test_results.hash_id,
|
||||
log_level.value AS level,
|
||||
CASE res.module
|
||||
WHEN 'DNSSEC' THEN res.module
|
||||
ELSE initcap(res.module)
|
||||
END AS module,
|
||||
CASE
|
||||
WHEN res.testcase IS NULL THEN ''
|
||||
WHEN res.testcase LIKE 'DNSSEC%' THEN res.testcase
|
||||
ELSE initcap(res.testcase)
|
||||
END AS testcase,
|
||||
res.tag AS tag,
|
||||
res.timestamp AS timestamp,
|
||||
COALESCE(migrated_args.args, '{}') AS args
|
||||
FROM test_results,
|
||||
json_to_recordset(results)
|
||||
AS res(module TEXT, testcase TEXT, tag TEXT, level TEXT, timestamp REAL, args JSONB)
|
||||
LEFT JOIN log_level ON (res.level = log_level.level)
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT CASE WHEN res.testcase = 'DELEGATION01'
|
||||
AND res.tag ~ '^(NOT_)?ENOUGH_IPV[46]_NS_(CHILD|DEL)$'
|
||||
AND (NOT res.args ? 'ns_list')
|
||||
THEN (
|
||||
SELECT res.args
|
||||
- ARRAY['ns_ip_list', 'nsname_list']
|
||||
|| jsonb_build_object('ns_list', string_agg(name || '/' || ip, ';'))
|
||||
FROM unnest(
|
||||
string_to_array(res.args->>'ns_ip_list', ';'),
|
||||
string_to_array(res.args->>'nsname_list', ';'))
|
||||
AS unnest(ip, name))
|
||||
ELSE res.args
|
||||
END) AS migrated_args(args) ON TRUE]);
|
||||
|
||||
# I’ve tried to avoid hardcoding numbers but FETCH statements somehow
|
||||
# don’t like being parameterized with placeholders. This will have to do.
|
||||
my $read_sth = $dbh->prepare(sprintf(q[FETCH FORWARD %d FROM curs], $chunk_size));
|
||||
my $row_inserted = 0;
|
||||
|
||||
while ($read_sth->execute(), (my $row_count = $read_sth->rows()) > 0) {
|
||||
my @copydata = ();
|
||||
|
||||
print("Progress update: ${row_inserted} rows inserted\n");
|
||||
$row_inserted += $row_count;
|
||||
|
||||
$dbh->do(q[COPY result_entries FROM STDIN]);
|
||||
while (my $row = $read_sth->fetchrow_arrayref) {
|
||||
my @columns = map {
|
||||
if (defined $_) {
|
||||
# Replaces invalid UTF-8 sequences with U+FFFD and escapes
|
||||
# characters as required by PostgreSQL’s text COPY data
|
||||
# format.
|
||||
$utf8->encode($utf8->decode($_) =~ s/[\x08-\x0D\\]/$conv{ord $&}/aegr);
|
||||
} else {
|
||||
'\N';
|
||||
}
|
||||
} @$row;
|
||||
my $line = join("\t", @columns) . "\n";
|
||||
push @copydata, $line;
|
||||
$dbh->pg_putcopydata( $line );
|
||||
}
|
||||
|
||||
try {
|
||||
$dbh->pg_putcopyend();
|
||||
}
|
||||
catch {
|
||||
print("An error occurred while trying to copy some data.\n");
|
||||
my ($fh, $filename) = tempfile();
|
||||
print $fh @copydata;
|
||||
close $fh;
|
||||
print("The data supplied to COPY causing the failure has been ",
|
||||
"stored in $filename for inspection\n");
|
||||
die $_;
|
||||
}
|
||||
}
|
||||
$dbh->do(q[CLOSE curs]);
|
||||
print("Done inserting ${row_inserted} rows\n");
|
||||
}
|
||||
|
||||
sub patch_db_postgresql {
|
||||
use Zonemaster::Backend::DB::PostgreSQL;
|
||||
|
||||
my $db = Zonemaster::Backend::DB::PostgreSQL->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
$dbh->{AutoCommit} = 0;
|
||||
|
||||
try {
|
||||
$db->create_schema();
|
||||
|
||||
# Make sure the planner knows that log_level is a small table
|
||||
# so it can optimize step 1 appropriately
|
||||
$dbh->do(q[ANALYZE log_level]);
|
||||
|
||||
print( "\n-> (1/2) Populating new result_entries table\n" );
|
||||
_patch_db_postgresql_step1( $dbh );
|
||||
|
||||
$dbh->do(
|
||||
'UPDATE test_results SET results = NULL WHERE results IS NOT NULL'
|
||||
);
|
||||
|
||||
print( "\n-> (2/2) Normalizing domain names\n" );
|
||||
_update_data_normalize_domains( $db );
|
||||
|
||||
$dbh->commit();
|
||||
} catch {
|
||||
print( "\nCould not upgrade database: " . $_ );
|
||||
|
||||
$dbh->rollback();
|
||||
};
|
||||
}
|
||||
|
||||
sub patch_db_sqlite {
|
||||
use Zonemaster::Backend::DB::SQLite;
|
||||
|
||||
my $db = Zonemaster::Backend::DB::SQLite->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
$dbh->{AutoCommit} = 0;
|
||||
|
||||
try {
|
||||
$db->create_schema();
|
||||
|
||||
print( "\n-> (1/2) Populating new result_entries table\n" );
|
||||
_update_data_result_entries( $dbh, 142 );
|
||||
|
||||
print( "\n-> (2/2) Normalizing domain names\n" );
|
||||
_update_data_normalize_domains( $db );
|
||||
|
||||
$dbh->commit();
|
||||
} catch {
|
||||
print( "\nError while upgrading database: " . $_ );
|
||||
|
||||
$dbh->rollback();
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Engine;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
|
||||
my $db_engine = $config->DB_engine;
|
||||
print "Configured database engine: $db_engine\n";
|
||||
|
||||
if ( $db_engine =~ /^(MySQL|PostgreSQL|SQLite)$/ ) {
|
||||
print( "Starting database migration\n" );
|
||||
|
||||
_update_result_entries( $config->new_DB()->dbh() );
|
||||
|
||||
print( "\nMigration done\n" );
|
||||
}
|
||||
else {
|
||||
die "Unknown database engine configured: $db_engine\n";
|
||||
}
|
||||
|
||||
|
||||
sub _update_result_entries {
|
||||
my ( $dbh ) = @_;
|
||||
|
||||
$dbh->do(<<SQL) or die 'Migration failed';
|
||||
UPDATE result_entries
|
||||
SET module = 'Backend'
|
||||
WHERE upper(module) = 'BACKEND_TEST_AGENT';
|
||||
SQL
|
||||
}
|
||||
@@ -0,0 +1,253 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Try::Tiny;
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
|
||||
my %patch = (
|
||||
mysql => \&patch_db_mysql,
|
||||
postgresql => \&patch_db_postgresql,
|
||||
sqlite => \&patch_db_sqlite,
|
||||
);
|
||||
|
||||
my $db_engine = $config->DB_engine;
|
||||
|
||||
if ( $db_engine =~ /^(MySQL|PostgreSQL|SQLite)$/ ) {
|
||||
$patch{ lc $db_engine }();
|
||||
}
|
||||
else {
|
||||
die "Unknown database engine configured: $db_engine\n";
|
||||
}
|
||||
|
||||
sub patch_db_mysql {
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $db = Zonemaster::Backend::DB::MySQL->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
# add table constraints
|
||||
$dbh->do( 'ALTER TABLE users ADD CONSTRAINT UNIQUE (username)' );
|
||||
$dbh->do( 'ALTER TABLE test_results ADD CONSTRAINT UNIQUE (hash_id)' );
|
||||
|
||||
# update columns names, data type and default value
|
||||
$dbh->do( 'ALTER TABLE test_results MODIFY COLUMN id BIGINT AUTO_INCREMENT' );
|
||||
$dbh->do( 'ALTER TABLE test_results CHANGE COLUMN creation_time created_at DATETIME NOT NULL' );
|
||||
$dbh->do( 'ALTER TABLE test_results CHANGE COLUMN test_start_time started_at DATETIME DEFAULT NULL' );
|
||||
$dbh->do( 'ALTER TABLE test_results CHANGE COLUMN test_end_time ended_at DATETIME DEFAULT NULL' );
|
||||
|
||||
$dbh->do( 'ALTER TABLE batch_jobs CHANGE COLUMN creation_time created_at DATETIME NOT NULL' );
|
||||
|
||||
$dbh->{AutoCommit} = 0;
|
||||
|
||||
try {
|
||||
# normalize "domain" column
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = LOWER(domain)
|
||||
WHERE CAST(domain AS BINARY) RLIKE '[A-Z]'
|
||||
]
|
||||
);
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = '.'
|
||||
WHERE domain = '..' OR domain = '...' OR domain = '....'
|
||||
]
|
||||
);
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = TRIM( TRAILING '.' FROM domain )
|
||||
WHERE domain != '.' AND domain LIKE '%.'
|
||||
]
|
||||
);
|
||||
|
||||
$dbh->commit();
|
||||
} catch {
|
||||
print( "Could not upgrade database: " . $_ );
|
||||
|
||||
eval { $dbh->rollback() };
|
||||
};
|
||||
}
|
||||
|
||||
sub patch_db_postgresql {
|
||||
use Zonemaster::Backend::DB::PostgreSQL;
|
||||
|
||||
my $db = Zonemaster::Backend::DB::PostgreSQL->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
$dbh->{AutoCommit} = 0;
|
||||
|
||||
try {
|
||||
# update sequence data type to BIGINT
|
||||
$dbh->do( 'ALTER SEQUENCE test_results_id_seq AS BIGINT' );
|
||||
$dbh->do( 'ALTER TABLE test_results ALTER COLUMN id SET DATA TYPE BIGINT' );
|
||||
|
||||
# remove default value for "creation_time"
|
||||
$dbh->do( 'ALTER TABLE test_results ALTER COLUMN creation_time DROP DEFAULT' );
|
||||
$dbh->do( 'ALTER TABLE batch_jobs ALTER COLUMN creation_time DROP DEFAULT' );
|
||||
|
||||
# rename columns
|
||||
$dbh->do( 'ALTER TABLE test_results RENAME COLUMN creation_time TO created_at' );
|
||||
$dbh->do( 'ALTER TABLE test_results RENAME COLUMN test_start_time TO started_at' );
|
||||
$dbh->do( 'ALTER TABLE test_results RENAME COLUMN test_end_time TO ended_at' );
|
||||
$dbh->do( 'ALTER TABLE batch_jobs RENAME COLUMN creation_time TO created_at' );
|
||||
|
||||
# add table constraints
|
||||
$dbh->do( 'ALTER TABLE test_results ADD UNIQUE (hash_id)' );
|
||||
$dbh->do( 'ALTER TABLE users ADD UNIQUE (username)' );
|
||||
|
||||
# normalize "domain" column
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = LOWER(domain)
|
||||
WHERE domain != LOWER(domain)
|
||||
]
|
||||
);
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = '.'
|
||||
WHERE domain = '..' OR domain = '...' OR domain = '....'
|
||||
]
|
||||
);
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = RTRIM(domain, '.')
|
||||
WHERE domain != '.' AND domain LIKE '%.'
|
||||
]
|
||||
);
|
||||
|
||||
$dbh->commit();
|
||||
} catch {
|
||||
print( "Could not upgrade database: " . $_ );
|
||||
|
||||
eval { $dbh->rollback() };
|
||||
};
|
||||
}
|
||||
|
||||
sub patch_db_sqlite {
|
||||
use Zonemaster::Backend::DB::SQLite;
|
||||
|
||||
my $db = Zonemaster::Backend::DB::SQLite->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
$dbh->{AutoCommit} = 0;
|
||||
|
||||
# since we change the default value for a column, the whole table needs to
|
||||
# be recreated
|
||||
# 1. rename the table to "<table>_old"
|
||||
# 2. recreate a clean table schema
|
||||
# 3. populate it with the values from "<table>_old"
|
||||
# 4. remove "<table>_old" and indexes
|
||||
# 5. recreate the indexes
|
||||
try {
|
||||
$dbh->do('ALTER TABLE test_results RENAME TO test_results_old');
|
||||
$dbh->do('ALTER TABLE batch_jobs RENAME TO batch_jobs_old');
|
||||
$dbh->do('ALTER TABLE users RENAME TO users_old');
|
||||
|
||||
# create the tables
|
||||
$db->create_schema();
|
||||
|
||||
# populate the tables
|
||||
$dbh->do(
|
||||
q[
|
||||
INSERT INTO test_results
|
||||
(
|
||||
id,
|
||||
hash_id,
|
||||
domain,
|
||||
batch_id,
|
||||
created_at,
|
||||
started_at,
|
||||
ended_at,
|
||||
priority,
|
||||
queue,
|
||||
progress,
|
||||
fingerprint,
|
||||
params,
|
||||
results,
|
||||
undelegated
|
||||
)
|
||||
SELECT
|
||||
id,
|
||||
hash_id,
|
||||
lower(domain),
|
||||
batch_id,
|
||||
creation_time,
|
||||
test_start_time,
|
||||
test_end_time,
|
||||
priority,
|
||||
queue,
|
||||
progress,
|
||||
fingerprint,
|
||||
params,
|
||||
results,
|
||||
undelegated
|
||||
FROM test_results_old
|
||||
]
|
||||
);
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = '.'
|
||||
WHERE domain = '..' OR domain = '...' OR domain = '....'
|
||||
]
|
||||
);
|
||||
$dbh->do(
|
||||
q[
|
||||
UPDATE test_results
|
||||
SET domain = RTRIM(domain, '.')
|
||||
WHERE domain != '.' AND domain LIKE '%.'
|
||||
]
|
||||
);
|
||||
|
||||
$dbh->do('
|
||||
INSERT INTO batch_jobs
|
||||
(
|
||||
id,
|
||||
username,
|
||||
created_at
|
||||
)
|
||||
SELECT
|
||||
id,
|
||||
username,
|
||||
creation_time
|
||||
FROM batch_jobs_old
|
||||
');
|
||||
|
||||
$dbh->do('
|
||||
INSERT INTO users
|
||||
(
|
||||
id,
|
||||
username,
|
||||
api_key
|
||||
)
|
||||
SELECT
|
||||
id,
|
||||
username,
|
||||
api_key
|
||||
FROM users_old
|
||||
');
|
||||
|
||||
# delete old tables
|
||||
$dbh->do('DROP TABLE test_results_old');
|
||||
$dbh->do('DROP TABLE batch_jobs_old');
|
||||
$dbh->do('DROP TABLE users_old');
|
||||
|
||||
# recreate indexes
|
||||
$db->create_schema();
|
||||
|
||||
$dbh->commit();
|
||||
} catch {
|
||||
print( "Error while upgrading database: " . $_ );
|
||||
|
||||
eval { $dbh->rollback() };
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'MySQL' ) {
|
||||
die "The configuration file does not contain the MySQL backend";
|
||||
}
|
||||
my $dbh = Zonemaster::Backend::DB::MySQL->from_config( $config )->dbh;
|
||||
|
||||
sub patch_db {
|
||||
|
||||
####################################################################
|
||||
# TEST RESULTS
|
||||
####################################################################
|
||||
$dbh->do( 'ALTER TABLE test_results ADD COLUMN hash_id VARCHAR(16) NULL' );
|
||||
|
||||
$dbh->do( 'UPDATE test_results SET hash_id = (SELECT SUBSTRING(MD5(CONCAT(RAND(), UUID())) from 1 for 16))' );
|
||||
|
||||
$dbh->do( 'ALTER TABLE test_results MODIFY hash_id VARCHAR(16) DEFAULT NULL NOT NULL' );
|
||||
|
||||
$dbh->do(
|
||||
'CREATE TRIGGER before_insert_test_results
|
||||
BEFORE INSERT ON test_results
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
IF new.hash_id IS NULL OR new.hash_id=\'\'
|
||||
THEN
|
||||
SET new.hash_id = SUBSTRING(MD5(CONCAT(RAND(), UUID())) from 1 for 16);
|
||||
END IF;
|
||||
END;
|
||||
'
|
||||
);
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,22 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'MySQL' ) {
|
||||
die "The configuration file does not contain the MySQL backend";
|
||||
}
|
||||
my $dbh = Zonemaster::Backend::DB::MySQL->from_config( $config )->dbh;
|
||||
|
||||
sub patch_db {
|
||||
####################################################################
|
||||
# TEST RESULTS
|
||||
####################################################################
|
||||
$dbh->do( 'ALTER TABLE test_results ADD COLUMN nb_retries INTEGER NOT NULL DEFAULT 0' );
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,22 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'MySQL' ) {
|
||||
die "The configuration file does not contain the MySQL backend";
|
||||
}
|
||||
my $dbh = Zonemaster::Backend::DB::MySQL->from_config( $config )->dbh;
|
||||
|
||||
sub patch_db {
|
||||
############################################################################
|
||||
# Convert column "results" to MEDIUMBLOB so that it can hold larger results
|
||||
############################################################################
|
||||
$dbh->do( 'ALTER TABLE test_results MODIFY results mediumblob' );
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,76 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
use JSON::PP;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'MySQL' ) {
|
||||
die "The configuration file does not contain the MySQL backend";
|
||||
}
|
||||
my $db = Zonemaster::Backend::DB::MySQL->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
|
||||
sub patch_db {
|
||||
# Remove the trigger
|
||||
$dbh->do( 'DROP TRIGGER IF EXISTS before_insert_test_results' );
|
||||
|
||||
# Set the "hash_id" field to NOT NULL
|
||||
eval {
|
||||
$dbh->do( 'ALTER TABLE test_results MODIFY COLUMN hash_id VARCHAR(16) NOT NULL' );
|
||||
};
|
||||
print( "Error while changing DB schema: " . $@ ) if ($@);
|
||||
|
||||
# Rename column "params_deterministic_hash" into "fingerprint"
|
||||
# Since MariaDB 10.5.2 (2020-03-26) <https://mariadb.com/kb/en/mariadb-1052-release-notes/>
|
||||
# ALTER TABLE t1 RENAME COLUMN old_col TO new_col;
|
||||
# Before that we need to use CHANGE COLUMN <https://mariadb.com/kb/en/alter-table/#change-column>
|
||||
eval {
|
||||
$dbh->do('ALTER TABLE test_results CHANGE COLUMN params_deterministic_hash fingerprint CHARACTER VARYING(32)');
|
||||
};
|
||||
print( "Error while changing DB schema: " . $@ ) if ($@);
|
||||
|
||||
# Update index
|
||||
eval {
|
||||
# retrieve all indexes by key name
|
||||
my $indexes = $dbh->selectall_hashref( 'SHOW INDEXES FROM test_results', 'Key_name' );
|
||||
if ( exists($indexes->{test_results__params_deterministic_hash}) ) {
|
||||
$dbh->do( "DROP INDEX test_results__params_deterministic_hash ON test_results" );
|
||||
}
|
||||
$dbh->do( "CREATE INDEX test_results__fingerprint ON test_results (fingerprint)" );
|
||||
};
|
||||
print( "Error while updating the index: " . $@ ) if ($@);
|
||||
|
||||
# Update the "undelegated" column
|
||||
my $sth1 = $dbh->prepare('SELECT id, params from test_results', undef);
|
||||
$sth1->execute;
|
||||
while ( my $row = $sth1->fetchrow_hashref ) {
|
||||
my $id = $row->{id};
|
||||
my $raw_params = decode_json($row->{params});
|
||||
my $ds_info_values = scalar grep !/^$/, map { values %$_ } @{$raw_params->{ds_info}};
|
||||
my $nameservers_values = scalar grep !/^$/, map { values %$_ } @{$raw_params->{nameservers}};
|
||||
my $undelegated = $ds_info_values > 0 || $nameservers_values > 0 || 0;
|
||||
|
||||
$dbh->do('UPDATE test_results SET undelegated = ? where id = ?', undef, $undelegated, $id);
|
||||
}
|
||||
|
||||
|
||||
# remove the "user_info" column from the "users" table
|
||||
# the IF EXISTS clause is available with MariaDB but not MySQL
|
||||
eval {
|
||||
$dbh->do( "ALTER TABLE users DROP COLUMN user_info" );
|
||||
};
|
||||
print( "Error while dropping the column: " . $@ ) if ($@);
|
||||
|
||||
# remove the "nb_retries" column from the "test_results" table
|
||||
eval {
|
||||
$dbh->do( "ALTER TABLE test_results DROP COLUMN nb_retries" );
|
||||
};
|
||||
print( "Error while dropping the column: " . $@ ) if ($@);
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,23 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::MySQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'MySQL' ) {
|
||||
die "The configuration file does not contain the MySQL backend";
|
||||
}
|
||||
my $dbh = Zonemaster::Backend::DB::MySQL->from_config( $config )->dbh;
|
||||
|
||||
sub patch_db {
|
||||
|
||||
####################################################################
|
||||
# TEST RESULTS
|
||||
####################################################################
|
||||
$dbh->do( 'ALTER TABLE test_results ADD COLUMN hash_id VARCHAR(16) DEFAULT substring(md5(random()::text || clock_timestamp()::text) from 1 for 16) NOT NULL' );
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,23 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::PostgreSQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'PostgreSQL' ) {
|
||||
die "The configuration file does not contain the PostgreSQL backend";
|
||||
}
|
||||
my $dbh = Zonemaster::Backend::DB::PostgreSQL->from_config( $config )->dbh;
|
||||
|
||||
sub patch_db {
|
||||
|
||||
####################################################################
|
||||
# TEST RESULTS
|
||||
####################################################################
|
||||
$dbh->do( 'ALTER TABLE test_results ADD COLUMN nb_retries INTEGER NOT NULL DEFAULT 0' );
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,109 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
use JSON::PP;
|
||||
use Encode;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::PostgreSQL;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'PostgreSQL' ) {
|
||||
die "The configuration file does not contain the PostgreSQL backend";
|
||||
}
|
||||
my $db = Zonemaster::Backend::DB::PostgreSQL->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
|
||||
sub patch_db {
|
||||
# Drop default value for the "hash_id" field
|
||||
$dbh->do( 'ALTER TABLE test_results ALTER COLUMN hash_id DROP DEFAULT' );
|
||||
|
||||
# Rename column "params_deterministic_hash" into "fingerprint"
|
||||
eval {
|
||||
$dbh->do( 'ALTER TABLE test_results RENAME COLUMN params_deterministic_hash TO fingerprint' );
|
||||
};
|
||||
print( "Error while changing DB schema: " . $@ ) if ($@);
|
||||
|
||||
# Update index
|
||||
eval {
|
||||
$dbh->do( "DROP INDEX IF EXISTS test_results__params_deterministic_hash" );
|
||||
$dbh->do( "CREATE INDEX test_results__fingerprint ON test_results (fingerprint)" );
|
||||
};
|
||||
print( "Error while updating the index: " . $@ ) if ($@);
|
||||
|
||||
# test_start_time and test_end_time default to NULL
|
||||
eval {
|
||||
$dbh->do('ALTER TABLE test_results ALTER COLUMN test_start_time SET DEFAULT NULL');
|
||||
$dbh->do('ALTER TABLE test_results ALTER COLUMN test_end_time SET DEFAULT NULL');
|
||||
};
|
||||
print( "Error while changing DB schema: " . $@ ) if ($@);
|
||||
|
||||
|
||||
# Add missing "domain" and "undelegated" columns
|
||||
eval {
|
||||
$dbh->do( "ALTER TABLE test_results ADD COLUMN domain VARCHAR(255) NOT NULL DEFAULT ''" );
|
||||
$dbh->do( 'ALTER TABLE test_results ADD COLUMN undelegated integer NOT NULL DEFAULT 0' );
|
||||
};
|
||||
print( "Error while changing DB schema: " . $@ ) if ($@);
|
||||
|
||||
# Update index
|
||||
eval {
|
||||
$dbh->do( "DROP INDEX IF EXISTS test_results__domain_undelegated" );
|
||||
$dbh->do( "CREATE INDEX test_results__domain_undelegated ON test_results (domain, undelegated)" );
|
||||
};
|
||||
print( "Error while updating the index: " . $@ ) if ($@);
|
||||
|
||||
# New index
|
||||
eval {
|
||||
$dbh->do( 'CREATE INDEX IF NOT EXISTS test_results__progress_priority_id ON test_results (progress, priority DESC, id) WHERE (progress = 0)' );
|
||||
};
|
||||
print( "Error while creating the index: " . $@ ) if ($@);
|
||||
|
||||
# Update the "domain" column
|
||||
$dbh->do( "UPDATE test_results SET domain = (params->>'domain')" );
|
||||
# remove default value to "domain" column
|
||||
$dbh->do( "ALTER TABLE test_results ALTER COLUMN domain DROP DEFAULT" );
|
||||
|
||||
# Update the "undelegated" column
|
||||
my $sth1 = $dbh->prepare('SELECT id, params from test_results', undef);
|
||||
$sth1->execute;
|
||||
while ( my $row = $sth1->fetchrow_hashref ) {
|
||||
my $id = $row->{id};
|
||||
my $raw_params;
|
||||
|
||||
if (utf8::is_utf8($row->{params}) ) {
|
||||
$raw_params = decode_json( encode_utf8 ( $row->{params} ) );
|
||||
} else {
|
||||
$raw_params = decode_json( $row->{params} );
|
||||
}
|
||||
|
||||
my $ds_info_values = scalar grep !/^$/, map { values %$_ } @{$raw_params->{ds_info}};
|
||||
my $nameservers_values = scalar grep !/^$/, map { values %$_ } @{$raw_params->{nameservers}};
|
||||
my $undelegated = $ds_info_values > 0 || $nameservers_values > 0 || 0;
|
||||
|
||||
$dbh->do('UPDATE test_results SET undelegated = ? where id = ?', undef, $undelegated, $id);
|
||||
}
|
||||
|
||||
# add "username" and "api_key" columns to the "users" table
|
||||
eval {
|
||||
$dbh->do( 'ALTER TABLE users ADD COLUMN username VARCHAR(128)' );
|
||||
$dbh->do( 'ALTER TABLE users ADD COLUMN api_key VARCHAR(512)' );
|
||||
};
|
||||
print( "Error while changing DB schema: " . $@ ) if ($@);
|
||||
|
||||
# update the columns
|
||||
eval {
|
||||
$dbh->do( "UPDATE users SET username = (user_info->>'username'), api_key = (user_info->>'api_key')" );
|
||||
};
|
||||
print( "Error while updating the users table: " . $@ ) if ($@);
|
||||
|
||||
# remove the "user_info" column from the "users" table
|
||||
$dbh->do( "ALTER TABLE users DROP COLUMN IF EXISTS user_info" );
|
||||
|
||||
# remove the "nb_retries" column from the "test_results" table
|
||||
$dbh->do( "ALTER TABLE test_results DROP COLUMN IF EXISTS nb_retries" );
|
||||
}
|
||||
|
||||
patch_db();
|
||||
@@ -0,0 +1,95 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
use JSON::PP;
|
||||
|
||||
use DBI qw(:utils);
|
||||
|
||||
use Zonemaster::Backend::Config;
|
||||
use Zonemaster::Backend::DB::SQLite;
|
||||
|
||||
my $config = Zonemaster::Backend::Config->load_config();
|
||||
if ( $config->DB_engine ne 'SQLite' ) {
|
||||
die "The configuration file does not contain the SQLite backend";
|
||||
}
|
||||
my $db = Zonemaster::Backend::DB::SQLite->from_config( $config );
|
||||
my $dbh = $db->dbh;
|
||||
|
||||
|
||||
sub patch_db {
|
||||
|
||||
# since we change the default value for a column, the whole table needs to
|
||||
# be recreated
|
||||
# 1. rename the "test_results" table to "test_results_old"
|
||||
# 2. create the new "test_results" table
|
||||
# 3. populate it with the values from "test_results_old"
|
||||
# 4. remove old table and indexes
|
||||
# 5. recreate the indexes
|
||||
eval {
|
||||
$dbh->do('ALTER TABLE test_results RENAME TO test_results_old');
|
||||
|
||||
# create the table
|
||||
$db->create_schema();
|
||||
|
||||
# populate it
|
||||
# - nb_retries is omitted as we remove this column
|
||||
# - params_deterministic_hash is renamed to fingerprint
|
||||
$dbh->do('
|
||||
INSERT INTO test_results
|
||||
SELECT id,
|
||||
hash_id,
|
||||
domain,
|
||||
batch_id,
|
||||
creation_time,
|
||||
test_start_time,
|
||||
test_end_time,
|
||||
priority,
|
||||
queue,
|
||||
progress,
|
||||
params_deterministic_hash,
|
||||
params,
|
||||
results,
|
||||
undelegated
|
||||
FROM test_results_old
|
||||
');
|
||||
|
||||
$dbh->do('DROP TABLE test_results_old');
|
||||
|
||||
# recreate indexes
|
||||
$db->create_schema();
|
||||
};
|
||||
print( "Error while updating the 'test_results' table schema: " . $@ ) if ($@);
|
||||
|
||||
# Update the "undelegated" column
|
||||
my $sth1 = $dbh->prepare('SELECT id, params from test_results', undef);
|
||||
$sth1->execute;
|
||||
while ( my $row = $sth1->fetchrow_hashref ) {
|
||||
my $id = $row->{id};
|
||||
my $raw_params = decode_json($row->{params});
|
||||
my $ds_info_values = scalar grep !/^$/, map { values %$_ } @{$raw_params->{ds_info}};
|
||||
my $nameservers_values = scalar grep !/^$/, map { values %$_ } @{$raw_params->{nameservers}};
|
||||
my $undelegated = $ds_info_values > 0 || $nameservers_values > 0 || 0;
|
||||
|
||||
$dbh->do('UPDATE test_results SET undelegated = ? where id = ?', undef, $undelegated, $id);
|
||||
}
|
||||
|
||||
|
||||
# in order to properly drop a column, the whole table needs to be recreated
|
||||
# 1. rename the "users" table to "users_old"
|
||||
# 2. create the new "users" table
|
||||
# 3. populate it with the values from "users_old"
|
||||
# 4. remove old table
|
||||
eval {
|
||||
$dbh->do('ALTER TABLE users RENAME TO users_old');
|
||||
|
||||
# create the table
|
||||
$db->create_schema();
|
||||
|
||||
# populate it
|
||||
$dbh->do('INSERT INTO users SELECT id, username, api_key FROM users_old');
|
||||
|
||||
$dbh->do('DROP TABLE users_old');
|
||||
};
|
||||
print( "Error while updating the 'users' table schema: " . $@ ) if ($@);
|
||||
}
|
||||
|
||||
patch_db();
|
||||
Reference in New Issue
Block a user