** Use at your own risk, not OraPub's ** 5-September-2014 Craig Shallahamer, craig@orapub.com This document contains the scripts, notes, and comments related to my experiments regarding the relationship between parallelism and elapsed time. What I'm demonstrating is: Elapsed Time = DB Time = DB CPU + non-idle Wait Time Wall Time = Elapsed / Effective Parallelism The experiment was run on a single node Oracle 12.1 with ASM configuration. Here are some parameter settings: instance_name string ORCL db_unique_name string ORCL db_name string ORCL db_block_buffers integer 0 db_cache_size big integer 0 cpu_count integer 2 memory_max_target big integer 1504M memory_target big integer 1504M user_dump_dest string /oracle/diag/rdbms/orcl/ORCL/t spfile string +DATA/ORCL/spfileorcl.ora -- you want a really small SGA perhaps 500M to force physical IO show sga -- make sure PQ is setup. you want to see parallel max_servers > 1 show parameter parallel create or replace function rss_get_interval_s_fnc(i_intrvl interval day to second) return number is begin return extract(day from i_intrvl) * 86400 + extract(hour from i_intrvl) * 3600 + extract(minute from i_intrvl) * 60 + extract(second from i_intrvl); exception when others then begin return null; end; end; / analyze table big1 estimate statistics; analyze table big2 estimate statistics; Table big2 is in USERS, which is 3 phy devices Table big1 is in CRAIGTEST set timing on set autotrace on alter system flush buffer_cache; select /*+ FULL(big2) NOPARALLEL (big2) */ count(*) from big2 where rownum<9000000; alter system flush buffer_cache; select /*+ FULL(big2) PARALLEL(big2,3) */ count(*) from big2 where rownum<9000000; into i_var from big where rownum<1000000; -- connect as dba user connect osmmon/osmmon drop table op_results; create table op_results ( sample_no number, wall_s number, el_vsql_s number, cpu_vsql_s number, wait_time_s number, db_cpu_s number, db_time_s number ); -- You need to get the sql_id for the below chunk of code. It's kind of a chicken/egg kind of thing -- what I did was run the below code, used the OSM rss.sql tool grab the sql_id, then set the sql_id in a few lines below, -- then ran the code below and verified the sql_id again using rss.sql. @rss 16 16 0 9999 % % 1 -or- @rss 0 9999 0 9999 % % 1 Once rss.sql is running, I ran the below snippet to get the SQL_IDs select sid from v$mystat where rownum = 1; declare i_var number; begin --select /*+ FULL(big2) NOPARALLEL (big2) */ count(*) into i_var from big2 where rownum<9000000; select /*+ FULL(big2) PARALLEL(big2,3) */ count(*) into i_var from big2 where rownum<9000000; end; / The results on my system... def sqlid='f2pf23bb4vtph' -- noparallel def sqlid='f80ps3cpmz2uu' -- parallel Here is the snippet I used to actually grab the data. It's not pretty but it works. def total_samples=2 def chill=4 commit; truncate table op_results; exec dbms_lock.sleep(&chill); set tab off set serveroutput on begin declare theline varchar2(500); sample number; time_t0 timestamp; time_t1 timestamp; wall_s number; mysid_v number; vsql_cputime_t0 number; vsql_cputime_t1 number; cpu_vsql_s number; vsql_eltime_t0 number; vsql_eltime_t1 number; el_vsql_s number; wait_time_t0 number; wait_time_t1 number; wait_time_s number; db_cpu_t0 number; db_cpu_t1 number; db_cpu_s number; db_time_t0 number; db_time_t1 number; db_time_s number; i_var number; begin select sid into mysid_v from v$mystat where rownum = 1; execute immediate 'alter system flush buffer_cache'; for sample in 1..&total_samples loop select sum(time_waited) into wait_time_t0 from v$session_event where wait_class != 'Idle' and event not like 'SQL*Net%' and sid = mysid_v; select value into db_cpu_t0 from v$sess_time_model where stat_name = 'DB CPU' and sid = mysid_v; select value into db_time_t0 from v$sess_time_model where stat_name = 'DB time' and sid = mysid_v; select cpu_time into vsql_cputime_t0 from v$sql where sql_id = '&sqlid'; select elapsed_time into vsql_eltime_t0 from v$sql where sql_id = '&sqlid'; select current_timestamp into time_t0 from dual; -- select count(*) into i_var from dual; -- f2pf23bb4vtph -- select /*+ FULL(big2) NOPARALLEL (big2) */ count(*) into i_var from big2 where rownum<9000000; -- f80ps3cpmz2uu select /*+ FULL(big2) PARALLEL(big2,3) */ count(*) into i_var from big2 where rownum<9000000; -- starting another sql is a good way to trigger any stats to be flushed select count(*) into i_var from dual; select sum(time_waited) into wait_time_t1 from v$session_event where wait_class != 'Idle' and event not like 'SQL*Net%' and sid = mysid_v; select value into db_cpu_t1 from v$sess_time_model where stat_name = 'DB CPU' and sid = mysid_v; select value into db_time_t1 from v$sess_time_model where stat_name = 'DB time' and sid = mysid_v; select cpu_time into vsql_cputime_t1 from v$sql where sql_id = '&sqlid'; select elapsed_time into vsql_eltime_t1 from v$sql where sql_id = '&sqlid'; select current_timestamp into time_t1 from dual; select (wait_time_t1-wait_time_t0)/100 into wait_time_s from dual; select (db_cpu_t1-db_cpu_t0)/1000000 into db_cpu_s from dual; select (db_time_t1-db_time_t0)/1000000 into db_time_s from dual; select (vsql_cputime_t1-vsql_cputime_t0)/1000000 into cpu_vsql_s from dual; select (vsql_eltime_t1-vsql_eltime_t0)/1000000 into el_vsql_s from dual; select rss_get_interval_s_fnc(time_t1-time_t0) into wall_s from dual; theline := 'sample='||sample||' wall_s='||wall_s||' el_vsql_s='||el_vsql_s||' cpu_vsql_s='||cpu_vsql_s||' wait_time_s='||wait_time_s||' db_cpu_s='||db_cpu_s||' db_time_s='||db_time_s; dbms_output.put_line(theline); insert into op_results values (sample, wall_s, el_vsql_s, cpu_vsql_s, wait_time_s, db_cpu_s, db_time_s); commit; -- So we can see progress more quickly. execute immediate 'alter system flush buffer_cache'; dbms_lock.sleep(&chill); end loop; commit; end; end; / Here are the actual results. select * from op_results; NO Parallel Output ************************************************** main session is sid 16 SQL> select * from op_results; SAMPLE_NO WALL_S EL_VSQL_S CPU_VSQL_S WAIT_TIME_S DB_CPU_S DB_TIME_S ---------- ---------- ---------- ---------- ----------- ---------- ---------- 1 35.480252 35.470015 9.764407 24.97 9.428506 34.152294 2 35.670021 35.659748 9.778554 25.15 9.774984 35.541861 3 35.749926 35.739473 9.774375 25.12 9.31266 34.126285 4 35.868076 35.857752 9.772321 25.32 9.345398 34.273479 5 36.193062 36.18378 9.712962 25.46 9.548465 35.499693 310 15:31:21:552 16 827 OSMMON WAIT f2pf23bb4vtph ior direct path read [9,12160,128] 311 15:31:22:551 16 827 OSMMON CPU f2pf23bb4vtph - - [,,] 312 15:31:23:554 16 827 OSMMON CPU f2pf23bb4vtph - - [,,] 313 15:31:24:553 16 827 OSMMON WAIT f2pf23bb4vtph ior direct path read [9,17540,124] 314 15:31:25:553 16 827 OSMMON WAIT f2pf23bb4vtph ior direct path read [9,19712,128] Parallel Output ************************************************** main session is sid 16 SQL> select * from op_results; SAMPLE_NO WALL_S EL_VSQL_S CPU_VSQL_S WAIT_TIME_S DB_CPU_S DB_TIME_S ---------- ---------- ---------- ---------- ----------- ---------- ---------- 1 46.305951 132.174453 19.53818 .01 4.069579 4.664083 2 46.982111 132.797536 19.371063 .02 3.809439 4.959602 3 47.79761 134.338069 19.739735 .02 4.170921 4.555491 4 45.97324 131.809249 19.397557 .01 3.790226 4.159572 5 46.053922 131.765983 19.754143 .01 4.062703 4.461175 100 15:27:51:617 196 355 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,42368,128] 100 15:27:51:617 36 151 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,25600,128] 100 15:27:51:617 26 829 OSMMON WAIT f80ps3cpmz2uu ior direct path read [8,62976,128] 100 15:27:51:617 180 21 - WAIT g6tq73c02ujn5 iow direct path write temp [201,34177,3] 101 15:27:52:617 180 21 - WAIT g6tq73c02ujn5 ior db file sequential read [4,272,1] 101 15:27:52:617 196 355 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,43520,128] 101 15:27:52:617 36 151 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,27392,128] 101 15:27:52:617 26 829 OSMMON WAIT f80ps3cpmz2uu ior direct path read [8,64640,128] 101 15:27:52:617 168 1 - WAIT - iow log file parallel write [2,4,2] 102 15:27:53:617 180 21 - CPU 7r24h5ucyjggz - - [,,] 102 15:27:53:617 196 355 OSMMON CPU f80ps3cpmz2uu - - [,,] 102 15:27:53:617 36 151 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,28928,128] 102 15:27:53:617 26 829 OSMMON WAIT f80ps3cpmz2uu ior direct path read [8,81024,128] 103 15:27:54:617 6 1 - WAIT - ior control file sequential read [0,18,1] 103 15:27:54:617 180 21 - WAIT g89yk8ub53cyx ior db file sequential read [4,3449,1] 103 15:27:54:617 196 355 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,81664,128] 103 15:27:54:617 36 151 OSMMON WAIT f80ps3cpmz2uu ior direct path read [9,30328,8] 103 15:27:54:617 26 829 OSMMON WAIT f80ps3cpmz2uu ior direct path read [8,82560,128]