Wednesday, April 15, 2009

Benchmark row-by-row and set based process

Goal
----
Compare the performance benchmark for row-by-row update and single SQL set based update.
The benchmark matrix supply supported information to Product Manager to prove why we should move some
row-by-row "real time"(that can be near real time) processes to Asynchronous set based bulk process.

file://T:/NextAction/benchmark_update_listing.sql

Solution
--------
Generate testing data, update some rows, benchmark with RunStats package.

Result
------
From benchmark, you can see

- Set based process vs. row-by-row process, commit out the loop.
Row-by-row process takes 3 times longer Elapsed Time, 3 times more CPU, 3 times more logical I/O,
and use 258% more Latches, latches are a type of lock, locks are serialization devices,
serialization devices inhibit scalability.

- Set based process vs. row-by-row process, commit in the loop.
Row-by-row process takes 5 times longer Elapsed Time, 5 times more CPU, 5 times more logical I/O,
and use 1,019.26% more Latches.

- If we introduce blocking row-by-row process from OIMS, by introducing more latches from different concurrent users,
it will be even worse.
A latch is a serialization device, the more people that request the same latch at the some time necessarily implies
"longer runtimes" - as the number of concurrent users go up, so will the observed run times and cpu times
(as you spin on the latch trying to get it), as we experience contention.

So,

SQL SET BASED BULK PROCESSING SLOW BY SLOW

You did good by turning slow by slow into BULK.

You could have done better by turning BULK into SET OPERATION.



Stats
-----
*) commit out the loop

Run1 ran in 79 hsecs
Run2 ran in 25 hsecs
run 1 ran in 316% of the time

Name                                              Run1          Run2          Diff
STAT...db block changes                          4,555         4,568            13
STAT...db block gets                             2,418         2,444            26
STAT...Elapsed Time                                 80            26           -54
STAT...CPU used by this session                     81            26           -55
LATCH.simulator hash latch                         308           181          -127
STAT...table scan rows gotten                    2,728         2,222          -506
STAT...redo size                             5,841,592     5,842,304           712
LATCH.checkpoint queue latch                     2,223           102        -2,121
STAT...execute count                             2,224             4        -2,220
STAT...sorts (memory)                            2,222             0        -2,222
LATCH.MinActiveScn Latch                         2,428           198        -2,230
LATCH.object queue header operation              2,862           393        -2,469
STAT...undo change vector size               2,837,256     2,839,872         2,616
LATCH.cache buffers lru chain                    4,445           195        -4,250
STAT...session logical reads                     9,292         2,751        -6,541
STAT...consistent gets                           6,874           307        -6,567
LATCH.cache buffers chains                      23,525        12,415       -11,110

Run1 latches total versus runs -- difference and pct
Run1          Run2          Diff        Pct
36,466        14,118       -22,348    258.29%

PL/SQL procedure successfully completed.


*) commit in the loop

Run1 ran in 129 hsecs
Run2 ran in 24 hsecs
run 1 ran in 537.5% of the time

Name                                              Run1          Run2          Diff
STAT...CPU used by this session                    130            26          -104
STAT...Elapsed Time                                132            25          -107
LATCH.Consistent RBA                             1,260             8        -1,252
STAT...commit cleanouts                          2,224           100        -2,124
LATCH.redo writing                               3,782            25        -3,757
LATCH.cache buffers lru chain                    4,454           195        -4,259
STAT...db block changes                          8,916         4,572        -4,344
LATCH.session allocation                         4,481            52        -4,429
STAT...db block gets                             8,917         2,456        -6,461
STAT...consistent gets                           6,882           408        -6,474
STAT...recursive calls                           6,714            35        -6,679
LATCH.redo allocation                            8,225            29        -8,196
LATCH.In memory undo latch                       8,893            11        -8,882
LATCH.DML lock allocation                        8,891             7        -8,884
LATCH.undo global data                          11,136           129       -11,007
STAT...session logical reads                    15,799         2,864       -12,935
LATCH.enqueue hash chains                       13,351            25       -13,326
LATCH.cache buffers chains                      60,632        12,552       -48,080
STAT...undo change vector size               2,989,432     2,841,148      -148,284
STAT...redo size                             6,391,412     5,844,136      -547,276
STAT...IMU undo allocation size              4,193,320         1,080    -4,192,240

Run1 latches total versus runs -- difference and pct
Run1          Run2          Diff        Pct
145,815        14,306      -131,509  1,019.26%

PL/SQL procedure successfully completed.


Setup
-----

drop table listing_wide purge;
create table listing_wide
(
vendor_id   number(10,0)  Not Null ,
listing_id   number(20,0)  Not Null ,
vendor_listing_id  Varchar(40)  Not Null ,
hash_code  VarChar2(32)  Not Null  ,
qty   number(5,0)  Not Null  ,
price  number(10,2) Not Null  ,
currency  varchar2(3) default 'USD' Not Null,
listingtype  number(4,0) default 1001  Not Null,
book_lang varchar2(3) default ' ' Not Null ,
create_date Date default sysdate Not Null ,
update_date Date default sysdate Not Null ,
abe_cat0 varchar2(100)     ,
abe_cat1  varchar2(100)      ,
abe_cat2  varchar2(100)      ,
vend_bsa  Varchar2(30)     ,
abe_bsa  Varchar2(30)     ,
binding  varchar2(30)    ,
booktype varchar2(30)     ,
bookcondition varchar2(30)     ,
cat_providerid Number(5,0)    ,
cat_providerkey Varchar2(50)    ,
edition_num  number(10)      ,
edition_txt  varchar2(40)      ,
editor  varchar(255)    ,
illustrator varchar2(255)    ,
inscription varchar2(50)    ,
isbn_raw  Varchar2(25)             ,
isbn_vendor13 Number(13,0)     ,
isbn_matched13 Number(13,0)     ,
jacketcondition Varchar2(30)    ,
pictures Varchar2(255)    ,
printing  Varchar2(20)      ,
publisher_name  varchar2(750)      ,
publisher_year  Number(4)      ,
publisher_location  varchar2(750)     ,
item_size   Varchar2(50)     ,
subject  Varchar2(2000)     ,
synopsis Varchar2(255)    ,
author   varchar(750)      ,
title  varchar2(750)    ,
vend_cat varchar2(255)    ,
volume  Varchar2(50)    ,
weight  Varchar2(40)    ,
description varchar2(4000)
--, constraint listing_wide_pk primary key(listing_id)
)
-- TABLESPACE data_auto
NOLOGGING
PARTITION BY LIST (vendor_id) 
(  
PARTITION v2134736  values( 2134736 ),
PARTITION v8408184  values( 8408184 ),
PARTITION v999  values( 999 ),
PARTITION vendor_NULL  VALUES (NULL),
PARTITION vendor_small VALUES (DEFAULT)
)
NOCOMPRESS 
NOPARALLEL
ENABLE ROW MOVEMENT;


create index listing_wide_i_hs on listing_wide(vendor_listing_id, hash_code) local nologging;
create index listing_wide_i_id on listing_wide(listing_id) local nologging;

truncate table listing_wide;

insert /*+ append */ into listing_wide 
(listing_id, vendor_id, price, qty, hash_code, vendor_listing_id, author, title,description,isbn_raw, vend_bsa)
select
rownum listing_id, 
mod(rownum,8) vendor_id,
10.55 price,
30 qty,
rpad('x',31,'x') hash_code,
rownum vendor_listing_id,
rpad(rownum,400,'x') author,
rpad('x',400,'x') title,
rpad('x',300,'x') description,
rownum isbn_raw,
mod(rownum,100)||','||mod(Round(rownum/10),100) bsa
from dual
connect by level <= 100100;

commit;

select vendor_id, isbn_raw, hash_code, author
from listing_wide where rownum <= 3;

exec dbms_stats.gather_table_stats(user,'listing_wide');

drop table listing_file;

create table listing_file
PARTITION BY LIST (time_part_key)
SUBPARTITION BY LIST (match_col)
SUBPARTITION TEMPLATE 
(SUBPARTITION vendor_id_part  VALUES (1) ,
SUBPARTITION hash_code_part  VALUES (2) ,
SUBPARTITION listing_id_part VALUES (3) ,
SUBPARTITION other_part VALUES (DEFAULT)
)
(
PARTITION data_part_0 VALUES (0),
PARTITION data_part_1 VALUES (1),
PARTITION data_part_2 VALUES (2),
PARTITION data_part_3 VALUES (3),
PARTITION data_part_4 VALUES (4),
PARTITION data_part_5 VALUES (5),
--PARTITION dummy_null  VALUES (NULL),
PARTITION dummy_other VALUES (DEFAULT)
)
--tablespace data_auto
nologging
as
select a.* 
--, cast(null as timestamp(3)) update_date
, cast(null as number(2))    time_part_key
, 9 dest_queue
, 9 match_col
from listing_wide a
where 1=0;

alter table listing_file modify (
currency  varchar2(3) default 'USD' ,
listingtype  number(4,0) default 1001  ,
book_lang varchar2(3) default ' '  ,
create_date Date default sysdate  ,
update_date Date default sysdate 
);

alter table listing_file modify
(
update_date    default systimestamp,
time_part_key  default mod(floor(to_number(to_char(sysdate,'mi'))/10),10)
);

truncate table listing_file;

insert /*+ append */ into listing_file
(listing_id, vendor_id, price, qty, hash_code, vendor_listing_id, author, title,description,isbn_raw, vend_bsa
, match_col)
select
rownum listing_id, 
mod(rownum,8) vendor_id,
10.55 price,
30 qty,
rpad('x',31,'x') hash_code,
rownum vendor_listing_id,
rpad(rownum,400,'x') author,
rpad('x',400,'x') title,
rpad('x',300,'x') description,
rownum isbn_raw,
mod(rownum,100)||','||mod(Round(rownum/10),100) bsa,
3 match_col
from dual
connect by level <= 2222;

commit;

exec dbms_stats.gather_table_stats(user,'listing_file');

Test
----

set linesize 120
set serveroutput on

Begin
runstats_pkg.rs_start;

for s in (select * from listing_file)
loop
update  listing_wide d
set
d.price      = s.price    ,
d.qty       = s.qty    ,
d.hash_code      = s.hash_code   ,
d.vendor_listing_id = s.vendor_listing_id,
d.author      = s.author    ,
d.title      = s.title   ,
d.description     = s.description  ,
d.isbn_raw      = s.isbn_raw   ,
d.vend_bsa     = s.vend_bsa   
where s.vendor_id = d.vendor_id and s.listing_id = d.listing_id
;
--commit;

end loop;
commit;

runstats_pkg.rs_middle;

MERGE INTO listing_wide d
using listing_file s
on (s.vendor_id = d.vendor_id and s.listing_id = d.listing_id)
when matched then
update set
d.price      = s.price    ,
d.qty       = s.qty    ,
d.hash_code      = s.hash_code   ,
d.vendor_listing_id = s.vendor_listing_id,
d.author      = s.author    ,
d.title      = s.title   ,
d.description     = s.description  ,
d.isbn_raw      = s.isbn_raw   ,
d.vend_bsa     = s.vend_bsa   
;
commit;

runstats_pkg.rs_stop(5);
End;
/

Reference
---------
http://tkyte.blogspot.com/2006/10/slow-by-slow.html

  • bulking it up with BULK COLLECT,  bulk-update them back into the database, using the FORALL statement with the UPDATE

http://www.oracle.com/technetwork/issue-archive/2011/11-may/o31asktom-354139.html

Tuesday, April 14, 2009

table stats tool script

Since I saw Ningoo show his dandy tool tbsql and made my mouth watering, and he was reluctant to share his favorite tool, so I have to create one myself.
The good news is he promised he will open source it in someday.

Purpose:
Display table and its index statistics by specify Owner and Table_Name.
It can help you investigate the SQL query optimizer issue, cooperate with 10053 trace tool. And it can do more...

Reference:
http://www.ningoo.net/html/2009/how_to_get_extent_id_from_rowid.html

Here is the script:


/*

-- File name: table.sql
-- Goal: display table and index statistics by specify Owner and Table_Name
--
-- Author: Yi (Charlie) Zhu
-- update: 29-Jan-2009
-- Blog: http://mujiang.blogspot.com/
--
-- Usage: @table [owner] [table_name]

29-Jan-2009: Charlie: Show popular value chart for height balance histogram


Reference
--
http://www.ningoo.net/html/2009/how_to_get_extent_id_from_rowid.html

*/

define owner='&1'
define table_name='&2'

set serveroutput off
set timing off
set verify off
set linesize 200
set define &

COLUMN SEGMENT_NAME FORMAT A40 HEADING 'Segment Name'
COLUMN SEGMENT_TYPE FORMAT A25 HEADING 'Segment Type'
COLUMN SEGMENT_SUBTYPE FORMAT A25 HEADING 'Segment SubType'
COLUMN Tablespace_Name FORMAT A30 HEADING 'Tablespace_Name'
COLUMN Partition_NAME FORMAT A25 HEADING 'Partition Name'
COLUMN subpartition_name FORMAT A22 HEADING 'SubPartition Name'
COLUMN INDEX_NAME FORMAT A30 HEADING 'INDEX Name'
COLUMN COLUMN_NAME FORMAT A30 HEADING 'COLUMN NAME'
COLUMN DATA_TYPE FORMAT A14 HEADING 'DATA TYPE'
COLUMN LOW_VALUE FORMAT A14 HEADING 'LOW_VALUE'
COLUMN HIGH_VALUE FORMAT A14 HEADING 'HIGH_VALUE'
COLUMN owner FORMAT A14
COLUMN table_name FORMAT A30
COLUMN HISTOGRAM FORMAT A9
COLUMN endpoint_actual_value format A30

COLUMN BLOCKS FORMAT 999,999,999 HEADING 'Blocks'
COLUMN MBytes FORMAT 999,999,999 HEADING 'MBytes'
COLUMN TOTAL_BYTES FORMAT 999,999 HEADING 'TotByte_MB'
COLUMN extents FORMAT 999,999
COLUMN LAST_ANALYZED FORMAT A11 HEADING 'LAST|ANALYZED'
COLUMN pop_chart FORMAT A11 HEADING 'Pop Chart'

COLUMN unf FORMAT 999,999.99 HEADING 'unf_K'
COLUMN fs1 FORMAT 999,999.99 HEADING 'fs1_K'
COLUMN fs2 FORMAT 999,999.99 HEADING 'fs2_K'
COLUMN fs3 FORMAT 999,999.99 HEADING 'fs3_K'
COLUMN fs4 FORMAT 999,999.99 HEADING 'fs4_K'
COLUMN full FORMAT 999,999.99 HEADING 'full_K'
COLUMN total_blocks FORMAT 999,999.99 HEADING 'total_blocks_K'
COLUMN unused_blocks FORMAT 999,999.99 HEADING 'unused_blocks_K'
COLUMN unfb FORMAT 999,999 HEADING 'unfb_M'
COLUMN fs1b FORMAT 999,999 HEADING 'fs1b_M'
COLUMN fs2b FORMAT 999,999 HEADING 'fs2b_M'
COLUMN fs3b FORMAT 999,999 HEADING 'fs3b_M'
COLUMN fs4b FORMAT 999,999 HEADING 'fs4b_M'
COLUMN fullb FORMAT 999,999 HEADING 'fullb_M'
COLUMN total_bytes FORMAT 999,999 HEADING 'total_bytes_M'
COLUMN unused_bytes FORMAT 999,999 HEADING 'unused_bytes_M'
COLUMN last_extf FORMAT 999,999 HEADING 'last_extf_id'
COLUMN last_extb FORMAT 9999999999 HEADING 'last_extb_id'
COLUMN lastusedblock/1024 FORMAT 9999999999 HEADING 'lastusedblock'

col num_rows new_value l_num_rows;


spool table_info.log

prompt table info

SELECT owner,table_name,pct_free,num_rows,blocks,chain_cnt,
avg_row_len,to_char(last_analyzed,'yyyy-mon-dd') last_analyzed,buffer_pool
FROM dba_tables
WHERE owner = (Upper('&&owner'))
and table_name IN (Upper('&&table_name'))
ORDER BY blocks desc;

prompt table stats

SELECT partition_name,subpartition_name,num_rows, blocks, empty_blocks, avg_space, chain_cnt, avg_row_len
FROM dba_tab_statistics
WHERE owner = (Upper('&&owner'))
and table_name IN (Upper('&&table_name'))
order by table_name,partition_position;

-- IOT come with Primary Key index name
select owner||'.'||segment_name segment_name,partition_name,round(bytes/1024/1024) mbytes,
blocks,extents,segment_type,tablespace_name
from dba_segments
WHERE owner = (Upper('&&owner'))
and segment_name IN (Upper('&&table_name'))
;

prompt column data type

SELECT --OWNER,TABLE_NAME,COLUMN_ID,
COLUMN_NAME,DATA_TYPE,DATA_LENGTH,DATA_PRECISION
FROM dba_tab_cols
WHERE owner = (Upper('&&owner'))
and TABLE_NAME IN (Upper('&&table_name'))
order by owner,table_name,column_name
;

prompt column stats

SELECT
column_name AS "NAME",
num_distinct AS "#DST",
low_value,
high_value,
density AS "DENS",
num_nulls AS "#NULL",
avg_col_len AS "AVGLEN",
histogram,
num_buckets AS "#BKT"
FROM dba_tab_col_statistics
WHERE owner = (Upper('&&owner'))
and TABLE_NAME IN (Upper('&&table_name'))
order by column_name
;

prompt column frequency histogram

SELECT column_name, endpoint_value, endpoint_actual_value, endpoint_number,
endpoint_number - lag(endpoint_number,1,0) OVER (partition by column_name ORDER BY endpoint_number) AS frequency
FROM dba_tab_histograms
WHERE owner = (Upper('&&owner'))
and table_name = (Upper('&&table_name'))
and column_name in
(select column_name FROM dba_tab_col_statistics
where owner = (Upper('&&owner'))
and TABLE_NAME IN (Upper('&&table_name'))
and histogram = 'FREQUENCY'
)
ORDER BY column_name, endpoint_number;

prompt column height-balanced histogram

SELECT column_name, endpoint_value, endpoint_number
, Max(endpoint_number) OVER (partition by column_name) Max_EPN
, endpoint_number - lag(endpoint_number,1,0) OVER (partition by column_name ORDER BY endpoint_number) AS popular_count
, &l_num_rows * (endpoint_number - lag(endpoint_number,1,0) OVER (partition by column_name ORDER BY endpoint_number))
/(Max(endpoint_number) OVER (partition by column_name)) AS Est_Card
, rpad('.',endpoint_number - lag(endpoint_number,1,0) OVER (partition by column_name ORDER BY endpoint_number) - 1,'x') pop_chart
FROM dba_tab_histograms
WHERE owner = (Upper('&&owner'))
and table_name = (Upper('&&table_name'))
and column_name in
(select column_name FROM dba_tab_col_statistics
where owner = (Upper('&&owner'))
and TABLE_NAME IN (Upper('&&table_name'))
and histogram = 'HEIGHT BALANCED'
)
ORDER BY column_name, endpoint_number;

prompt Index name,physical size,including columns and selectivity

SELECT m.INDEX_NAME,COLUMN_NAME,
COLUMN_POSITION,DISTINCT_KEYS,LEAF_BLOCKS,NUM_ROWS, COLUMN_LENGTH
FROM DBA_INDEXES m,dba_ind_columns d
WHERE m.owner = (Upper('&&owner'))
AND m.INDEX_NAME = d.INDEX_NAME
AND m.OWNER = d.TABLE_OWNER
AND m.TABLE_NAME = d.TABLE_NAME
AND m.TABLE_NAME IN (Upper('&&table_name'))
ORDER BY index_name,column_position;

prompt index stats

SELECT
index_name,
blevel,
leaf_blocks AS leaf_blks,
distinct_keys AS dst_keys,
num_rows,
clustering_factor AS clust_fact,
avg_leaf_blocks_per_key AS leaf_per_key,
avg_data_blocks_per_key AS data_per_key
FROM dba_ind_statistics
WHERE owner = (Upper('&&owner'))
AND TABLE_OWNER = (Upper('&&owner'))
AND TABLE_NAME IN (Upper('&&table_name'));

VARIABLE total_blocks NUMBER
VARIABLE total_bytes NUMBER
VARIABLE unused_blocks NUMBER
VARIABLE unused_bytes NUMBER
VARIABLE last_extf NUMBER
VARIABLE last_extb NUMBER
VARIABLE lastusedblock NUMBER
begin
DBMS_SPACE.UNUSED_SPACE(Upper('&&owner'), Upper('&&table_name'), 'TABLE',:total_blocks,
:total_bytes,:unused_blocks,:unused_bytes,:last_extf,
:last_extb,:lastusedblock);
end;
/

select
:total_bytes /1024/1024 total_bytes ,
:unused_bytes /1024/1024 unused_bytes ,
:total_blocks /1024 total_blocks ,
:unused_blocks/1024 unused_blocks,
:lastusedblock/1024 lastusedblock ,
:last_extf last_extf ,
:last_extb last_extb
from dual;

variable unf number;
variable unfb number;
variable fs1 number;
variable fs1b number;
variable fs2 number;
variable fs2b number;
variable fs3 number;
variable fs3b number;
variable fs4 number;
variable fs4b number;
variable full number;
variable fullb number;

-- shows the space usage of data blocks under the segment High Water Mark

begin
dbms_space.space_usage(Upper('&&owner'),Upper('&&table_name'),
'TABLE',
:unf,:unfb,
:fs1,:fs1b,
:fs2,:fs2b,
:fs3,:fs3b,
:fs4,:fs4b,
:full,:fullb);
end;
/

select
:unf /1024 unf
,:fs1 /1024 fs1
,:fs2 /1024 fs2
,:fs3 /1024 fs3
,:fs4 /1024 fs4
,:full /1024 full
,:unfb /1024/1024 unfb
,:fs1b /1024/1024 fs1b
,:fs2b /1024/1024 fs2b
,:fs3b /1024/1024 fs3b
,:fs4b /1024/1024 fs4b
,:fullb /1024/1024 fullb
from dual;

spool off

/*
-- unf: unformatted_blocks

print unf ;
print unfb ;
print fs4 ;
print fs4b;
print fs3 ;
print fs3b;
print fs2 ;
print fs2b;
print fs1 ;
print fs1b;
print full;
print fullb;


-- Returns the list of segments that are associated with the object
select * from Table(
DBMS_SPACE.OBJECT_DEPENDENT_SEGMENTS(
objowner => Upper('&&owner'),
objname => Upper('&&table_name'),
partname => Null,
objtype => 1)
);

SQL> print

TOTAL_BLOCKS TOTAL_BYTES UNUSED_BLOCKS UNUSED_BYTES LASTEXTF LAST_EXTB LASTUSEDBLOCK
------------ ----------- ------------- ------------ ---------- ---------- -------------
1536 12582912 0 0 5 1545 128


UNF UNFB
---------- ----------
0 0

FreeSpace1: 0 to 25% free space
FreeSpace2: 25 to 50% free space
...

FS1 FS2 FS3 FS4 FULL
---------- ---------- ---------- ---------- ----------
386 25 16 945 132


FS1B FS2B FS3B FS4B FULLB
---------- ---------- ---------- ---------- ----------
3162112 204800 131072 7741440 1081344

*/

Wednesday, April 08, 2009

debug.f to instrument PL/SQL code

Purpose
-------
Code Instrumentation is the fine art of making every other line of code be debug.
Here we show you how to install and implement debug.f in you PL/SQL code.

Download
--------

debugf.zip contains the code, then extract and save all files to a local folder.
http://asktom.oracle.com/~tkyte/debugf -- This URL expired,
use this one to download debugf:  https://drive.google.com/file/d/0B9XqiVdd05wXaEhDMTNYX1E5dWM/edit?usp=sharing


Discussion
----------
"Instrumentation overhead"
http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:10445025326812

"The 10 (Oracle Development) Commandments"
http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:51960184066540

Setup
-----
suggest you

create a utility user:
create user utility identified by utility;
grant create session, create procedure, create table, create trigger to utility;
alter user utility default tablespace users quota unlimited on users;

connect sys as sysdba
grant execute on utl_file to utility;
grant create public synonym, create any DIRECTORY to utility;

run install_debug in that schema 
create a public synonym for debug 
And then you can use it. 


connect utility/utility
@install_debug.sql
--@A:\plsql\debugf\install_debug.sql

create public synonym debug for utility.debug;
grant execute on utility.debug to public;

DROP DIRECTORY debug_dir;
CREATE or replace DIRECTORY debug_dir AS '/data1/debug';
CREATE or replace DIRECTORY debug_dir AS '/u01/debug';

CREATE or replace DIRECTORY debug_dir AS 'c:/temp/log';

--GRANT READ, WRITE ON DIRECTORY debug_dir TO PUBLIC; 
--exec debug.clear;
--exec debug.init(p_file=>'DEBUG_DIR/system.dbg');

-- Enable debug trace for current login user
declare
  ls_user varchar2(30) DEFAULT USER;
begin
 debug.clear;
 debug.init(p_file=>'DEBUG_DIR/'||ls_user||'.dbg');
 debug.status;
end;
/


-- Set module name to trace, p_modules=>''
-- Enable trace for a user, p_user=>'SYSTEM'
declare
 ls_trace_user varchar2(30) := 'CS_USER';
BEGIN
 debug.clear(p_user=>ls_trace_user);
 debug.init(p_modules=>'ALL', p_user=>ls_trace_user,
   --p_date_format=>'YYYY/MM/DD HH24:MI:SS',
   p_show_sesid=>'YES',
   p_file=>'DEBUG_DIR/'||ls_trace_user||'.dbg');
 debug.status(p_user=>ls_trace_user);
end;
/

-- batch setup logging users
declare
 ls_trace_user ct_type;
BEGIN

 ls_trace_user := ct_type ('SYS','SYSTEM','ABEDBA','ABELISTING');

 FOR i IN ls_trace_user.FIRST .. ls_trace_user.LAST
 LOOP
  debug.clear(p_user=>ls_trace_user(i));
  debug.init(p_modules=>'ALL', p_user=>ls_trace_user(i),
    --p_date_format=>'YYYY/MM/DD HH24:MI:SS',
    p_show_sesid=>'NO',
    p_file=>'DEBUG_DIR/'||ls_trace_user(i)||'.dbg');
  debug.status(p_user=>ls_trace_user(i));
 End Loop;
end;
/

Demo 1
------

Begin
  debug.f('start %s : %s', 1, sysdate);
  debug.f(SubStr(TO_CHAR(SQLCODE)||': '||SQLERRM,1,100));
Exception
    WHEN OTHERS THEN
      debug.f(SubStr(TO_CHAR(SQLCODE)||': '||SQLERRM,1,100));
End;
/

Demo 2
------

@connect "/ as sysdba"
grant execute on dbms_lock to ops$tkyte;
@connect /

set echo on
/*
first we'll start by enabling debug - this would be done once, sets up a 
table with values...

After that, we'll watch the debug....
*/
exec debug.init
exec debug.f( 'hello world' );
pause
set define off
host xterm -e tail -f /tmp/OPS\$TKYTE.dbg &
set define on

create or replace procedure want_to_trace
as
begin
    for i in 1 .. 10
    loop
        if ( mod(i,2) = 0 )
        then
            debug.f( 'processing step %s of 10 - %s', i , 'odd');
            dbms_lock.sleep(1);
        else
            debug.f( 'processing step %s of 10', i );
            dbms_lock.sleep(1);
        end if;
    end loop;
end;
/

exec want_to_trace

monitor
-------

To trace it:
tail -f [DEBUG_DIR]/[LOGIN USER].dbg

For example:
tail -f /data1/debug/SYSTEM.dbg

Tuesday, April 07, 2009

New efficient HASH FULL OUTER join in 11g

Purpose:
--
Oracle 11g query optimizer introduced a new HASH JOIN FULL OUTER execution method.

Starting with Oracle Database 11g Release 1 (11.1), Oracle Database automatically uses a native execution method based on a hash join for executing full outer joins whenever possible. When the new method is used to execute a full outer join, the execution plan for the query contains HASH JOIN FULL OUTER.

To instruct the optimizer not to consider using the hash full outer join execution method, apply the NO_NATIVE_FULL_OUTER_JOIN hint. The NO_NATIVE_FULL_OUTER_JOIN hint instructs the optimizer to exclude the native execution method when joining each specified table. Instead, the full outer join is executed as a union of left outer join and an anti-join.

Result
--
The new HASH JOIN FULL OUTER execution method logical reads(Buffers in execution plan output) cost equals too two base table FULL scan logical I/O adds up, nothing more.
The old full outer join cost 4 times more LIO on table scan.


Benchmark
---------

drop table t1 purge;
drop table t2 purge;

create table t1
as
select
rownum + 5 id,
rpad('x',500,'x') c1
from dual
connect by level <= 5000;

create table t2
as
select
rownum + 15 id,
rpad('x',500,'x') c1
from dual
connect by level <= 5000;

set serveroutput off
set linesize 170
set pagesize 0

select max(t1c1), max(t2c1)
from (
select /*+ gather_plan_statistics */ t1.id, t2.id, t1.c1 t1c1, t2.c1 t2c1
from t1 full outer join t2
on t1.id = t2.id
);

-- 10g way
SELECT * FROM table(dbms_xplan.display_cursor(NULL,NULL, 'runstats_last'));

-- 11g way
SELECT * FROM table(dbms_xplan.display_cursor(NULL,NULL, 'iostats memstats last partition'));

-----------------------------------------------------------------------------------------
| Id | Operation | Name | A-Rows | Buffers | Reads | Writes| A-Time |
-----------------------------------------------------------------------------------------
| 1 | SORT AGGREGATE | | 1 | 732 | 101 | 0 |00:00:00.15 |
| 2 | VIEW | | 5010 | 732 | 101 | 0 |00:00:00.06 |
| 3 | UNION-ALL | | 5010 | 732 | 101 | 0 |00:00:00.05 |
|* 4 | HASH JOIN RIGHT OUTER| | 5000 | 366 | 101 | 0 |00:00:00.04 |
| 5 | TABLE ACCESS FULL | T2 | 5000 | 183 | 56 | 0 |00:00:00.01 |
| 6 | TABLE ACCESS FULL | T1 | 5000 | 183 | 45 | 0 |00:00:00.01 |
|* 7 | HASH JOIN RIGHT ANTI | | 10 | 366 | 0 | 0 |00:00:00.01 |
| 8 | TABLE ACCESS FULL | T1 | 5000 | 183 | 0 | 0 |00:00:00.01 |
| 9 | TABLE ACCESS FULL | T2 | 5000 | 183 | 0 | 0 |00:00:00.01 |
-----------------------------------------------------------------------------------------

------------------------------------------------------------------------------------------------------
| Id | Operation | Name | A-Rows | A-Time | Buffers | OMem | 1Mem | Used-Mem |
------------------------------------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 1 |00:00:00.09 | 184 | | | |
| 1 | SORT AGGREGATE | | 1 |00:00:00.09 | 184 | | | |
| 2 | VIEW | VW_FOJ_0 | 5010 |00:00:00.03 | 184 | | | |
|* 3 | HASH JOIN FULL OUTER| | 5010 |00:00:00.03 | 184 | 3239K| 1058K| 3539K (0)|
| 4 | TABLE ACCESS FULL | T2 | 5000 |00:00:00.01 | 92 | | | |
| 5 | TABLE ACCESS FULL | T1 | 5000 |00:00:00.01 | 92 | | | |
------------------------------------------------------------------------------------------------------

Reference
--
Oracle® Database Performance Tuning Guide 11g Release 1 (11.1)
- 11 The Query Optimizer, 11.6.7 Outer Joins
OracleQueryOptimizer11g_20081202.pdf by Christian Antognini

Wednesday, April 01, 2009

PL/SQL instrument utility u$err package

Purpose
-------
1. PL/SQL utility to Instrument and Trace.
2. Err_Trap.

debug.f() write to db server trace file
u$err.trc() write to utility.error_log table, with autonomous transaction.

u$err.err() write error stack to utility.error_log table, and DBMS_OUTPUT to std_io.

File: file://A:/PLSQL/debugf/u$err_pkg.sql

Setup
-----

drop table utility.error_log;
create table utility.error_log
(err_code varchar2(10), err_module varchar2(30), err_action varchar2(30),
create_date date default SYSDATE, create_user varchar2(30) default USER,
err_text varchar2(4000)
)
partition by range (create_date) INTERVAL(NUMTOYMINTERVAL(1, 'MONTH'))
(
PARTITION p0 VALUES LESS THAN (TO_DATE('1-1-2009', 'DD-MM-YYYY'))
)
;

grant select on utility.error_log to qa_admin;


Code
----

CREATE OR REPLACE PACKAGE utility.u$err AS

/***
DEFUG.F : log trace information
***/

PROCEDURE trc (p_msg IN VARCHAR2);

PROCEDURE err (p_msg IN VARCHAR2 default '');

END;
/

CREATE OR REPLACE PACKAGE BODY utility.u$err AS

/***
Goal: 1. Trace, 2. Err_Trap

Who| Date | What
Charlie(Yi 木匠) | 12-June-2008 | combine FORMAT_ERROR_BACKTRACE and DBMS_UTILITY.FORMAT_ERROR_STACK to one string

DEFUG.F

set serveroutput on size 200000

alter package utility.u$err compile;
grant execute on utility.u$err to public;
create or replace public synonym u$err for utility.u$err;

File: A:\PLSQL\debugf\u$err_pkg.sql

***/

PROCEDURE trc (p_msg IN VARCHAR2)
IS
PRAGMA AUTONOMOUS_TRANSACTION;
ls_module varchar2(32767);
ls_action varchar2(32767);
ls_code varchar2(10);
BEGIN
DBMS_APPLICATION_INFO.READ_MODULE (
module_name => ls_module,
action_name => ls_action);
ls_code := to_char(sqlcode);

INSERT INTO error_log (err_code,err_module,err_action,err_text)
VALUES(ls_code,substr(ls_module,1,30),substr(ls_action,1,30), p_msg);
COMMIT;
DBMS_OUTPUT.PUT_LINE('trc:'||ls_code||':'||ls_action||':'||ls_module||':'||p_msg);
EXCEPTION
WHEN OTHERS THEN
ROLLBACK;
DBMS_OUTPUT.PUT_LINE('Error in log_err: '||SUBSTR(SQLERRM,1,120));
END;

PROCEDURE err (p_msg IN VARCHAR2)
IS
ls_err varchar2(32767);
BEGIN
ls_err := SubStr(p_msg||'.'||DBMS_UTILITY.FORMAT_ERROR_BACKTRACE||'<---'||
chr(10)||DBMS_UTILITY.FORMAT_ERROR_STACK, 1, 2000);
debug.f(ls_err);
u$err.trc (ls_err);
DBMS_APPLICATION_INFO.SET_MODULE(Null, Null);
EXCEPTION
WHEN OTHERS THEN
u$err.trc (SubStr(DBMS_UTILITY.FORMAT_ERROR_STACK,1,500));
DBMS_OUTPUT.PUT_LINE('Error stack in log_err: '||SUBSTR(SQLERRM,1,120));
END;

END;
/

alter package utility.u$err compile;
grant execute on utility.u$err to public;
create or replace public synonym u$err for utility.u$err;



Test u$err.err and u$err.trc
----
exec u$err.err;

declare
li pls_integer;
procedure p_in
is
begin
li := 5 / 0;
exception
when others then
u$err.err;
raise;
end;

begin
u$err.trc('start');
p_in;
u$err.trc('end');
exception
when others then
u$err.err('outer error');
u$err.trc('before raise');
raise;
u$err.trc('after raise');
end;
/

-- Query data
select * from
(
select * from utility.error_log
-- PARTITION FOR(to_date('11-mar-2009','dd-mon-yyyy'))
order by create_date desc
) where rownum < 10;


Housekeeping
----
--Purge old log
delete utility.error_log where create_date < sysdate - 100;
commit;

ALTER TABLE utility.error_log DROP PARTITION FOR(to_date('11-mar-2009','dd-mon-yyyy'));



Reference
---------
Oracle PL/SQL Best Practices - Steven Feuerstein
http://www.oreilly.com/catalog/9780596514105