Shantanu's Blog

Database Consultant

March 01, 2013

 

Redshift Tips

# List all tables:
select db_id, id, name, sum(rows) as mysum from stv_tbl_perm where db_id = 100546 group by db_id, id, name order by mysum desc;

# list all running processes:
select pid, query from stv_recents where status = 'Running';

# describe table
select * from PG_TABLE_DEF where tablename='audit_trail';

select * from pg_tables where schemaname = 'public'

# Disk space used:
select sum(used-tossed) as used, sum(capacity) as capacity from stv_partitions

# list all queries failing due to insufficient disk space
select '2000-01-01'::timestamp + (currenttime/1000000.0)* interval '1 second' as currenttime, querytxt from stl_disk_full_diag as a left join stl_query as b on a.query_id = b.query

# Query log
select query, starttime , substring from svl_qlog where substring like '%tbl_name%' order by starttime desc limit 50;

# command history
select * from stl_ddltext where text like '%ox_data_summary_hourly_depot%' limit 10

# last load errors
select starttime, filename, err_reason from stl_load_errors order by starttime desc limit 100

select filename, count(*) as cnt from stl_load_errors group by filename

# create table from another table
select * into newevent from event;

# Check how columns are compressed
ANALYZE COMPRESSION

# ANALYZE and VACUUM
If you insert, update, or delete a significant number of rows in a table, run the ANALYZE and VACUUM commands against the table.
"analyze compression tbl_name" command produce a report with the suggested column encoding.

# To find and diagnose load errors for table 'event'
create view loadview as
(select distinct tbl, trim(name) as table_name, query, starttime,
trim(filename) as input, line_number, field, err_code,
trim(err_reason) as reason
from stl_load_errors sl, stv_tbl_perm sp
where sl.tbl = sp.id);
select * from loadview where table_name='event';

# Query to find blocks used
select stv_tbl_perm.name, count(*)
from stv_blocklist, stv_tbl_perm
where stv_blocklist.tbl = stv_tbl_perm.id
and stv_blocklist.slice = stv_tbl_perm.slice
group by stv_tbl_perm.name
order by stv_tbl_perm.name;

# query load commits table

select filename, byte_offset, lines_scanned, errors, status, curtime from stl_load_commits
where filename like '%redshiftall%' order by curtime desc limit 10 ;

select  date(curtime) as updated, count(query) from stl_load_commits
where filename like '%redshiftall%' group by updated order by updated desc limit 10 ;

Load tips:
# While loading data you can specify "empty as null", "blanks as null" allow "max error 5", "ignore blank lines", "remove quotes", "use zip". Use the keywords: emptyasnull blanksasnull maxerror 5 ignoreblanklines removequotes gzip
# use NULL AS '\000' to fix the import from specific files
# use BLANKASNULL in the original COPY statement so that no empty strings are loaded into VARCHAR fields which might ultimately be converted to numeric fields.
# Use the NOLOAD keyword with a COPY command to validate the data in the input files before actually loading the data.
# use COMPUPDATE to enable automatic compression
# FILLRECORD to fill missing columns at the end with blanks or NULLs
# TRIMBLANKS Removes the trailing whitespace characters from a VARCHAR string.
# ESCAPE the backslash character (\) in input data is treated as an escape character. (useful for delimiters and embedded newlines)
# ROUNDEC a value of 20.259 is loaded into a DECIMAL(8,2) column is changed to 20.26. or else 20.25
# TRUNCATECOLUMNS Truncates data in columns to the appropriate number.
# IGNOREHEADER to ignore first row

_____

If you are using JDBC, can you try adding the keepalive option to your connect string. E.g.,
jdbc:postgresql://instance.amazonaws.com:8192/database?tcpkeepalive=true
You can have AUTOCOMMIT set in your Workbench client.
_____

In order to avoid timeout error while using workbench on Windows, use the following setting:
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\KeepAliveTime 30000
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\KeepAliveInterval 1000
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\TcpMaxDataRetransmission 10
_____

# Consider using DISTKEY and SORTKEY  - There can be multiple sortkeys but only one primary key.
# wlm_query_slot_count - This will set aside more memory for query, which may avoid operations spilling to disk
# the isolation level for Redshift is SERIALIZABLE

_____

// There is no equivalent of "show create table tbl_name"
select from the PG_TABLE_DEF table to gather all the necessary schema information

// convert to and from unixtime

select extract (epoch from timestamp '2011-08-08 11:11:58');
select TIMESTAMP 'epoch' + starttime * INTERVAL '1 second' starting from tbl_name;

// Update a joined table:

update abcd set ser_area_code=abcd_update.ser_area_code, preferences=abcd_update.preferences, opstype=abcd_update.opstype,
phone_type=abcd_update.phone_type
from abcd_update join abcd nc on nc.phone_number = abcd_update.phone_number

http://docs.aws.amazon.com/redshift/latest/dg/t_updating-inserting-using-staging-tables-.html#concept_upsert
_____

// install postgresql
yum install postgresql postgresql-server
chkconfig postgresql on

// You will now create a file where the redshift password will be stored.
vi  ~/.pgpass
c.us-east-1.redshift.amazonaws.com:5439:mydb:root:Passwd

chmod 0600 ~/.pgpass



// load data to redshift
cat to_psql.txt  | psql -hc.us-east-1.redshift.amazonaws.com  -Uroot -p5439 mydb > to_save.csv

// send the file as an attachment
echo "report file attached. " | mutt -s "result data " -a to_save.csv -- some_address@gmail.com

// mysqldump command that will generate the required statements to be used in redshift

mysqldump db_name tbl_name --where='1=1 limit 10' --compact --no-create-info --skip-quote-names > to_psql.txt

mysqldump -h1.2.3.4 -uroot -ppasswd -P3311 db_name table_names ox_banners ox_campaigns --compact --no-create-info --skip-extended-insert  --where 'campaignid = 66  '
_____

Amazon data types are different than of MySQL. For e.g. literals can be saved only as varchar type and upto 65000 bytes.

http://docs.aws.amazon.com/redshift/latest/dg/r_Character_types.html

Here is a script that will do this conversion automatically.

https://gist.github.com/shantanuo/5115366

_____


If postgresql client is installed, we can connect to redshift using something like this...

# PGPASSWORD=Fly8946392085 psql -U fsb_user_85_22719249 -h flydata-sandbox-cluster.clroanynhqjo.us-east-1.redshift.amazonaws.com -p 5439 -d flydatasandboxdb
Welcome to psql 8.1.23 (server 8.0.2), the PostgreSQL interactive terminal.

_____

## script that will display 10 rows from each table

#!/bin/sh

echo "select  name  from stv_tbl_perm where db_id = 100546 group by  name ;"  | psql -hkalc.us-east-1.redshift.amazonaws.com -Uroot -p5439 mydb  > /root/psql.txt 2>> /root/psql_err.txt

for tbl_name in `cat /root/psql.txt`
do
echo "$tbl_name" >> /root/psql_limit.txt 2>> /root/psql_limit_err.txt
echo "select * from $tbl_name limit 10 ; "  | psql -hkalc.us-east-1.redshift.amazonaws.com -Uroot -p5439 mydb  >> /var/www/psql_limit.txt 2>> /root/psql_limit_err.txt
echo "====================================="

done
_____


The following statement queries the STV_LOCKS table to view all locks in effect for current transactions:

select table_id, last_update, lock_owner, lock_owner_pid, lock_status
from stv_locks;

 table_id |        last_update         | lock_owner | lock_owner_pid |      lock_status
----------+----------------------------+------------+----------------+------------------------
   100295 | 2014-01-06 23:50:56.290917 |      95402 |           7723 | Holding write lock
   100304 | 2014-01-06 23:50:57.408457 |      95402 |           7723 | Holding write lock
   100304 | 2014-01-06 23:50:57.409986 |      95402 |           7723 | Holding  insert lock
(3 rows)

The following statement terminates the session holding the locks:

select pg_terminate_backend(7723);
_____

If you want to take backup of a system table for e.g. stl_load_commits periodically, then set-up a daily or weekly cron using this statement:

unload ( $$ select *, 'stl_load_commits_' || replace(left(getdate(), 13), ' ', '_') as tbl from stl_load_commits $$ ) to 's3://163cols/stl_load_commits_/backup/' credentials 'aws_access_key_id=xx;aws_secret_access_key=xx' PARQUET PARTITION BY (tbl) ALLOWOVERWRITE;

Labels:


Comments: Post a Comment

<< Home

Archives

June 2001   July 2001   January 2003   May 2003   September 2003   October 2003   December 2003   January 2004   February 2004   March 2004   April 2004   May 2004   June 2004   July 2004   August 2004   September 2004   October 2004   November 2004   December 2004   January 2005   February 2005   March 2005   April 2005   May 2005   June 2005   July 2005   August 2005   September 2005   October 2005   November 2005   December 2005   January 2006   February 2006   March 2006   April 2006   May 2006   June 2006   July 2006   August 2006   September 2006   October 2006   November 2006   December 2006   January 2007   February 2007   March 2007   April 2007   June 2007   July 2007   August 2007   September 2007   October 2007   November 2007   December 2007   January 2008   February 2008   March 2008   April 2008   July 2008   August 2008   September 2008   October 2008   November 2008   December 2008   January 2009   February 2009   March 2009   April 2009   May 2009   June 2009   July 2009   August 2009   September 2009   October 2009   November 2009   December 2009   January 2010   February 2010   March 2010   April 2010   May 2010   June 2010   July 2010   August 2010   September 2010   October 2010   November 2010   December 2010   January 2011   February 2011   March 2011   April 2011   May 2011   June 2011   July 2011   August 2011   September 2011   October 2011   November 2011   December 2011   January 2012   February 2012   March 2012   April 2012   May 2012   June 2012   July 2012   August 2012   October 2012   November 2012   December 2012   January 2013   February 2013   March 2013   April 2013   May 2013   June 2013   July 2013   September 2013   October 2013   January 2014   March 2014   April 2014   May 2014   July 2014   August 2014   September 2014   October 2014   November 2014   December 2014   January 2015   February 2015   March 2015   April 2015   May 2015   June 2015   July 2015   August 2015   September 2015   January 2016   February 2016   March 2016   April 2016   May 2016   June 2016   July 2016   August 2016   September 2016   October 2016   November 2016   December 2016   January 2017   February 2017   April 2017   May 2017   June 2017   July 2017   August 2017   September 2017   October 2017   November 2017   December 2017   February 2018   March 2018   April 2018   May 2018   June 2018   July 2018   August 2018   September 2018   October 2018   November 2018   December 2018   January 2019   February 2019   March 2019   April 2019   May 2019   July 2019   August 2019   September 2019   October 2019   November 2019   December 2019   January 2020   February 2020   March 2020   April 2020   May 2020   July 2020   August 2020   September 2020   October 2020   December 2020   January 2021   April 2021   May 2021   July 2021   September 2021   March 2022   October 2022   November 2022   March 2023   April 2023   July 2023   September 2023   October 2023   November 2023   April 2024   May 2024   June 2024   August 2024   September 2024   October 2024   November 2024   December 2024  

This page is powered by Blogger. Isn't yours?