不具备少做事的意识
全局臨時表特性
某日,某某生產系統忽然日志暴增,回滾段也暴增,系統IO壓力也增大...... 經過診斷分析排查后發現,原來是系統昨晚新上的程序模塊里出現類似delete from t_mid 的簡單刪除語句居然在短短時間內被執行了幾十萬 次,和相關人員確認后暫停了該程序。研究代碼邏輯發現該t_mid表其實為一張中間表,程序的運算中間臨時結果先存在這里,運算結束,就可 以清除了。 哦,這樣的需求,真需要這樣不停的去delete 嗎?刪除的開銷很大且會占用大量的回滾段和產生大量日志,能否不要刪除呢?且看下面全局 臨時表的例子--構造基于SESSION的全局臨時表(退出session該表記錄就會自動清空) drop table ljb_tmp_session; create global temporary table ljb_tmp_session on commit preserve rows as select * from dba_objects where 1=2; select table_name,temporary,duration from user_tables where table_name='LJB_TMP_SESSION';--構造基于事務的全局臨時表(commit提交后,不等退出session,在該表記錄就會自動清空) drop table ljb_tmp_transaction; create global temporary table ljb_tmp_transaction on commit delete rows as select * from dba_objects where 1=2; select table_name, temporary, DURATION from user_tables where table_name='LJB_TMP_TRANSACTION';insert all into ljb_tmp_transactioninto ljb_tmp_session select * from dba_objects;select session_cnt,transaction_cnt from (select count(*) session_cnt from ljb_tmp_session),(select count(*) transaction_cnt from ljb_tmp_transaction);commit;select session_cnt,transaction_cnt from (select count(*) session_cnt from ljb_tmp_session), (select count(*) transaction_cnt from ljb_tmp_transaction);disconnect connect ljb/ljbselect session_cnt,transaction_cnt from (select count(*) session_cnt from ljb_tmp_session), (select count(*) transaction_cnt from ljb_tmp_transaction);--SQL> drop table ljb_tmp_session;drop table ljb_tmp_sessionORA-14452: 試圖創建, 更改或刪除正在使用的臨時表中的索引SQL>分區清理
--范圍分區示例 drop table range_part_tab purge; --注意,此分區為范圍分區 create table range_part_tab (id number,deal_date date,area_code number,contents varchar2(4000))partition by range (deal_date)(partition p1 values less than (TO_DATE('2012-02-01', 'YYYY-MM-DD')),partition p2 values less than (TO_DATE('2012-03-01', 'YYYY-MM-DD')),partition p3 values less than (TO_DATE('2012-04-01', 'YYYY-MM-DD')),partition p4 values less than (TO_DATE('2012-05-01', 'YYYY-MM-DD')),partition p5 values less than (TO_DATE('2012-06-01', 'YYYY-MM-DD')),partition p6 values less than (TO_DATE('2012-07-01', 'YYYY-MM-DD')),partition p7 values less than (TO_DATE('2012-08-01', 'YYYY-MM-DD')),partition p8 values less than (TO_DATE('2012-09-01', 'YYYY-MM-DD')),partition p9 values less than (TO_DATE('2012-10-01', 'YYYY-MM-DD')),partition p10 values less than (TO_DATE('2012-11-01', 'YYYY-MM-DD')),partition p11 values less than (TO_DATE('2012-12-01', 'YYYY-MM-DD')),partition p12 values less than (TO_DATE('2013-01-01', 'YYYY-MM-DD')),partition p_max values less than (maxvalue));--以下是插入2012年一整年日期隨機數和表示福建地區號含義(591到599)的隨機數記錄,共有10萬條,如下: insert into range_part_tab (id,deal_date,area_code,contents)select rownum,to_date( to_char(sysdate-700,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),ceil(dbms_random.value(590,599)),rpad('*',400,'*')from dualconnect by rownum <= 100000; commit;--分區原理分析之普通表插入 drop table norm_tab purge; create table norm_tab (id number,deal_date date,area_code number,contents varchar2(4000)); insert into norm_tab(id,deal_date,area_code,contents)select rownum,to_date( to_char(sysdate-700,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),ceil(dbms_random.value(590,599)),rpad('*',400,'*')from dualconnect by rownum <= 100000; commit;--分區清除的方便例子 delete from norm_tab where deal_date>=TO_DATE('2012-09-01', 'YYYY-MM-DD') and deal_date <= TO_DATE('2012-09-30', 'YYYY-MM-DD'); --為了后續章節試驗的方便,本處暫且將刪除的記錄回退。 rollback; select * from range_part_tab partition(p9); alter table range_part_tab truncate partition p9;set linesize 1000 set autotrace on select count(*) from normal_tab where deal_date>=TO_DATE('2012-09-01', 'YYYY-MM-DD') and deal_date <= TO_DATE('2012-09-30', 'YYYY-MM-DD');select count(*) from range_part_tab where deal_date>=TO_DATE('2012-09-01', 'YYYY-MM-DD') and deal_date <= TO_DATE('2012-09-30', 'YYYY-MM-DD');分區交換的神奇例子 drop table mid_table purge; create table mid_table (id number ,deal_date date,area_code number,contents varchar2(4000)); select count(*) from range_part_tab partition(p8); ---當然,除了上述用partition(p8)的指定分區名查詢外,也可以采用分區條件代入查詢: select count(*) from range_part_tab where deal_date>=TO_DATE('2012-08-01', 'YYYY-MM-DD') and deal_date <= TO_DATE('2012-08-31', 'YYYY-MM-DD'); --以下命令就是經典的分區交換: alter table range_part_tab exchange partition p8 with table mid_table; --查詢發現分區8數據不見了。 select count(*) from range_part_tab partition(p8); ---而普通表記錄由剛才的0條變為8628條了,果然實現了交換。 select count(*) from mid_table ;分區消除
--范圍分區示例 drop table range_part_tab purge; --注意,此分區為范圍分區 create table range_part_tab (id number,deal_date date,area_code number,contents varchar2(4000))partition by range (deal_date)(partition p1 values less than (TO_DATE('2012-02-01', 'YYYY-MM-DD')),partition p2 values less than (TO_DATE('2012-03-01', 'YYYY-MM-DD')),partition p3 values less than (TO_DATE('2012-04-01', 'YYYY-MM-DD')),partition p4 values less than (TO_DATE('2012-05-01', 'YYYY-MM-DD')),partition p5 values less than (TO_DATE('2012-06-01', 'YYYY-MM-DD')),partition p6 values less than (TO_DATE('2012-07-01', 'YYYY-MM-DD')),partition p7 values less than (TO_DATE('2012-08-01', 'YYYY-MM-DD')),partition p8 values less than (TO_DATE('2012-09-01', 'YYYY-MM-DD')),partition p9 values less than (TO_DATE('2012-10-01', 'YYYY-MM-DD')),partition p10 values less than (TO_DATE('2012-11-01', 'YYYY-MM-DD')),partition p11 values less than (TO_DATE('2012-12-01', 'YYYY-MM-DD')),partition p12 values less than (TO_DATE('2013-01-01', 'YYYY-MM-DD')),partition p_max values less than (maxvalue));--以下是插入2012年一整年日期隨機數和表示福建地區號含義(591到599)的隨機數記錄,共有10萬條,如下: insert into range_part_tab (id,deal_date,area_code,contents)select rownum,to_date( to_char(sysdate-365,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),ceil(dbms_random.value(590,599)),rpad('*',400,'*')from dualconnect by rownum <= 100000; commit;--分區原理分析之普通表插入 drop table norm_tab purge; create table norm_tab (id number,deal_date date,area_code number,contents varchar2(4000)); insert into norm_tab(id,deal_date,area_code,contents)select rownum,to_date( to_char(sysdate-365,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),ceil(dbms_random.value(590,599)),rpad('*',400,'*')from dualconnect by rownum <= 100000; commit;--觀察范圍分區表的分區消除帶來的性能優勢 set linesize 1000 set autotrace traceonly set timing on select *from range_part_tabwhere deal_date >= TO_DATE('2012-09-04', 'YYYY-MM-DD')and deal_date <= TO_DATE('2012-09-07', 'YYYY-MM-DD');--比較相同語句,普通表無法用到DEAL_DATE條件進行分區消除的情況 select *from norm_tabwhere deal_date >= TO_DATE('2012-09-04', 'YYYY-MM-DD')and deal_date <= TO_DATE('2012-09-07', 'YYYY-MM-DD');--分區原理分析之普通表與分區表在段分配上的差異 SET LINESIZE 666 set pagesize 5000 column segment_name format a20 column partition_name format a20 column segment_type format a20 select segment_name,partition_name,segment_type,bytes / 1024 / 1024 "字節數(M)",tablespace_namefrom user_segmentswhere segment_name IN('RANGE_PART_TAB','NORM_TAB');不具備少做事意識(設計中的少做事)
減少SQL中的函數調用
---更細致的研究:drop table t1 purge; drop table t2 purge; create table t1 as select * from dba_objects; create table t2 as select * from dba_objects; update t2 set object_id=rownum; commit;create or replace function f_deal1(p_name in varchar2) return varchar2 deterministicis v_name varchar2(200); begin-- select substr(upper(p_name),1,4) into v_name from dual;v_name:=substr(upper(p_name),1,4);return v_name; end; /create or replace function f_deal2(p_name in varchar2) return varchar2 deterministicis v_name varchar2(200); beginselect substr(upper(p_name),1,4) into v_name from dual;-- v_name:=substr(upper(p_name),1,4);return v_name; end; /set autotrace traceonly statistics set linesize 1000select * from t1 where f_deal1(object_name)='FILE'; select * from t1 where f_deal2(object_name)='FILE' ; CREATE INDEX IDX_OBJECT_NAME ON T1(f_deal2(object_name)); select * from t1 where f_deal2(object_name)='FILE' ; select f_deal1(object_name) from t1 ; select f_deal2(object_name) from t1 ;select f_deal2(t1.object_name)from t1, t2where t1.object_id = t2.object_idand t2.object_type LIKE '%PART%';select *from t2, (select f_deal2(t1.object_name), object_ID from t1) twhere t2.object_id = t.object_idand t2.object_type LIKE '%PART%';select name from (select rownum rn ,f_deal2(t1.object_name) name from t1) where rn>=10 and rn<=12; select name from (select rownum rn ,f_deal2(t1.object_name) name from t1 where rownum<=12) where rn>=10 ;select name from (select rownum rn ,f_deal2(t1.object_name) name from t1) where rn<=12; select f_deal2(t1.object_name) name from t1 where rownum<=12;select f_deal2(t1.object_name) name from t1 where object_id=9999999999999; select * from t1 where f_deal2(t1.object_name)='AAAA' select * from t1 where f_deal1(t1.object_name)='AAAA'----------------------------------------------------------------------------------------------------------------------------------------------請看與開發中少做事相關的案例說明!
集合寫法
create or replace procedure proc_insert as beginfor i in 1 .. 100000loopinsert into t values (i); end loop;commit; end; /drop table t purge; create table t ( x int ); set timing on beginfor i in 1 .. 100000loopinsert into t values (i); end loop;commit; end; /--集合寫法改造:drop table t purge; create table t ( x int ); insert into t select rownum from dual connect by level<=100000; commit;視圖研究
drop table t1 cascade constraints purge; drop table t2 cascade constraints purge; create table t1 as select * from dba_objects; create table t2 as select * from dba_objects where rownum<=10000; update t1 set object_id=rownum ; update t2 set object_id=rownum ; commit;create or replace view v_t1_join_t2 as select t2.object_id,t2.object_name,t1.object_type,t1.owner from t1,t2 where t1.object_id=t2.object_id;set autotrace traceonly set linesize 1000 select * from v_t1_join_t2; select object_id,object_name from v_t1_join_t2;--做個試驗 alter table T1 add constraint pk_object_id primary key (OBJECT_ID); alter table T2 add constraint fk_objecdt_id foreign key (OBJECT_ID) references t1 (OBJECT_ID);select * from v_t1_join_t2; select object_id,object_name from v_t1_join_t2;----------------------------------------------------------------------------------------------------------------------------不回表
drop table t purge; create table t as select * from dba_objects; create index idx_object_id on t(object_id,object_type);set linesize 1000 set autotrace traceonly select object_id,object_type from t where object_id=28; /select * from t where object_id=28; /表連接中多余列的影響
--環境構造 --研究Nested Loops Join訪問次數前準備工作 DROP TABLE t1 CASCADE CONSTRAINTS PURGE; DROP TABLE t2 CASCADE CONSTRAINTS PURGE; CREATE TABLE t1 (id NUMBER NOT NULL,n NUMBER,contents VARCHAR2(4000)); CREATE TABLE t2 (id NUMBER NOT NULL,t1_id NUMBER NOT NULL,n NUMBER,contents VARCHAR2(4000)); execute dbms_random.seed(0); INSERT INTO t1SELECT rownum, rownum, dbms_random.string('a', 50)FROM dualCONNECT BY level <= 100ORDER BY dbms_random.random; INSERT INTO t2 SELECT rownum, rownum, rownum, dbms_random.string('b', 50) FROM dual CONNECT BY level <= 100000ORDER BY dbms_random.random; COMMIT; select count(*) from t1; select count(*) from t2;--Merge Sort Join取所有字段的情況 alter session set statistics_level=all ; set linesize 1000 SELECT /*+ leading(t2) use_merge(t1)*/ * FROM t1, t2 WHERE t1.id = t2.t1_id;select * from table(dbms_xplan.display_cursor(null,null,'allstats last'));---Merge Sort Join取部分字段的情況 SELECT /*+ leading(t2) use_merge(t1)*/ t1.id FROM t1, t2 WHERE t1.id = t2.t1_id; select * from table(dbms_xplan.display_cursor(null,null,'allstats last'));CASE WHEN改造
一.基本信息介紹dcc_sys_log和dcc_ne_log兩表皆無主鍵 統計信息都已收集 analyze table dcc_sys_log compute statistics for table for all indexes for all indexed columns; analyze table dcc_ne_log compute statistics for table for all indexes for all indexed columns; 兩表的PEER_ID都為非空列 alter table DCC_SYS_LOG modify PEER_ID not null; alter table DCC_NE_LOG modify peer_id not null; 表索引情況如下: create index IDX_DCC_SYS_LOG_PEER on DCC_SYS_LOG (PEER_ID); create index IDX_DCC_NE_LOG_peer on DCC_NE_LOG (PEER_ID); create index IDX_DCC_NE_LOG_time on DCC_NE_LOG (log_time);SQL語句最終返回記錄數特點:該復雜SQL返回記錄不超過100條: 一般而言,peer_id為監控主機的標識,一般不超過100個,所以最終復雜SQL的查詢結果一般不超過100個,如本案例監控的主機是41個, 最終復雜SQL的查詢值就是為41條記錄!數據及記錄分布情況 select count(*),count(peer_id),count(distinct(peer_id)),count(casewhen log_time between trunc(sysdate) and sysdate then1elsenullend) as current_day,count(log_time),count(distinct(log_time)) from dcc_ne_log;COUNT(*) COUNT(PEER_ID) COUNT(DISTINCT(PEER_ID)) CURRENT_DAY COUNT(LOG_TIME) COUNT(DISTINCT(LOG_TIME)) ---------- -------------- ------------------------ ----------- --------------- -------------------------87154 87154 7 9016 87154 380select count(*),count(peer_id),count(distinct(peer_id)),count(log_time),count(distinct(log_time)) from dcc_sys_log;COUNT(*) COUNT(PEER_ID) COUNT(DISTINCT(PEER_ID)) COUNT(LOG_TIME) COUNT(DISTINCT(LOG_TIME)) ---------- -------------- ------------------------ --------------- -------------------------4899834 4899834 41 4899834 27943二.逐步提升性能的如下4次改造原始老腳本及老執行計劃select distinct ne_state.peer_id peer_name,to_char(ne_state.ne_state) peer_state,(casewhen ne_state.ne_state = 0 thento_char(0)else(select distinct to_char(nvl(ne_active.active, 0))from dcc_sys_log,(select peer_id,decode(action,'active',1,'de-active',0,0) active,max(log_time)from dcc_sys_logwhere action = 'active'or action = 'de-active'group by (peer_id, action)) ne_activewhere dcc_sys_log.peer_id = ne_active.peer_id(+)and dcc_sys_log.peer_id = ne_state.peer_id)end) peer_active,(casewhen ne_state.ne_state = 0 thento_char(0)else(to_char(nvl((select count(*)from dcc_ne_logwhere dcc_ne_log.result <> 1and peer_id = ne_state.peer_idand log_time betweentrunc(sysdate) and sysdategroup by (peer_id)),0)))end) err_cnt,(casewhen ne_state.ne_state = 0 thento_char(0)else(to_char(nvl((select count(*)from dcc_ne_log in_dnlwhere in_dnl.direction = 'recv'and in_dnl.peer_id =ne_state.peer_idand log_time betweentrunc(sysdate) and sysdate),0)))end) recv_cnt,(casewhen ne_state.ne_state = 0 thento_char(0)else(to_char(nvl((select sum(length)from dcc_ne_log in_dnlwhere in_dnl.direction = 'recv'and in_dnl.peer_id =ne_state.peer_idand log_time betweentrunc(sysdate) and sysdate),0)))end) recv_byte,(casewhen ne_state.ne_state = 0 thento_char(0)else(to_char(nvl((select count(*)from dcc_ne_log in_dnlwhere in_dnl.direction = 'send'and in_dnl.peer_id =ne_state.peer_idand log_time betweentrunc(sysdate) and sysdate),0)))end) send_cnt,(casewhen ne_state.ne_state = 0 thento_char(0)else(to_char(nvl((select sum(length)from dcc_ne_log in_dnlwhere in_dnl.direction = 'send'and in_dnl.peer_id =ne_state.peer_idand log_time betweentrunc(sysdate) and sysdate),0)))end) send_bytefrom dcc_ne_log,(select distinct dsl1.peer_id peer_id,nvl(ne_disconnect_info.ne_state, 1) ne_statefrom dcc_sys_log dsl1,(select distinct dnl.peer_id peer_id,decode(action,'disconnect',0,'connect',0,1) ne_statefrom dcc_sys_log dsl, dcc_ne_log dnlwhere dsl.peer_id = dnl.peer_idand ((dsl.action = 'disconnect' anddsl.cause = '關閉對端') or(dsl.action = 'connect' anddsl.cause = '連接主機失敗'))and dsl.log_time =(select max(log_time)from dcc_sys_logwhere peer_id = dnl.peer_idand log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+)) ne_statewhere ne_state.peer_id = dcc_ne_log.peer_id(+)執行計劃SELECT STATEMENT, GOAL = ALL_ROWS 120155 7 483 HASH UNIQUE 6001 119421 6448734 MERGE JOIN OUTER 4414 119421 6448734 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 2866104 SORT JOIN 3818 2 60 VIEW IDEPTEST 3817 2 60 SORT GROUP BY 3817 2 84 TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_SYS_LOG 3816 30 1260 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 SORT GROUP BY NOSORT 302 1 31 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 1 31 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 33 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 540 17820 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 37 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 540 19980 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 33 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 503 16599 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 37 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 503 18611 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 HASH UNIQUE 120155 7 483 HASH JOIN RIGHT OUTER 52583 908514324 62687488356 VIEW IDEPTEST 28574 10 240 HASH UNIQUE 28574 10 1160 HASH JOIN 28573 1561 181076 HASH JOIN 28486 3874 368030 VIEW SYS VW_SQ_1 14365 41 1353 HASH GROUP BY 14365 41 2624 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14092 4895849 313334336 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14107 2462339 152665018 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 HASH JOIN RIGHT OUTER 18969 908514324 40883144580 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 5347 4896255 117510120 ----第1次改造---- 動作:將NE_STATE單獨成WITH子句,并用ROWNUM>=0來告知ORACLE中間記錄數的情況不過我覺的有點奇怪,以前用ROWNUM是為了讓視圖不要拆開與其他表關聯,做為一個整體,和今天的效果不一樣,今天感覺是起到改變表驅動順序的作用。 效果:改進了表驅動的順序,WITH部分的視圖結果集小,在前面驅動才是正確的,原先沒有ROWNUM時變成在后面驅動。具體可以通過UE的比較工具來比較老腳本的執行計劃可清晰看出,改變驅動順序后速度從3000秒提升到50秒。 第1次改造后的SQL語句如下: with ne_state as ( select distinct dsl1.peer_id peer_id, nvl(ne_disconnect_info.ne_state, 1) ne_state from dcc_sys_log dsl1, (select distinct dnl.peer_id peer_id, decode(action, 'disconnect', 0, 'connect', 0, 1) ne_state from dcc_sys_log dsl, dcc_ne_log dnl where dsl.peer_id = dnl.peer_id and ((dsl.action = 'disconnect' and dsl.cause = '關閉對端') or (dsl.action = 'connect' and dsl.cause = '連接主機失敗')) and dsl.log_time = (select max(log_time) from dcc_sys_log where peer_id = dnl.peer_id and log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+) and rownum>=0) select distinct ne_state.peer_id peer_name, to_char(ne_state.ne_state) peer_state, (case when ne_state.ne_state = 0 then to_char(0) else (select distinct to_char(nvl(ne_active.active, 0)) from dcc_sys_log, (select peer_id, decode(action, 'active', 1, 'de-active', 0, 0) active, max(log_time) from dcc_sys_log where action = 'active' or action = 'de-active' group by (peer_id, action)) ne_active where dcc_sys_log.peer_id = ne_active.peer_id(+) and dcc_sys_log.peer_id = ne_state.peer_id) end) peer_active, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log where dcc_ne_log.result <> 1 and peer_id = ne_state.peer_id and log_time between trunc(sysdate) and sysdate group by (peer_id)), 0))) end) err_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log in_dnl where in_dnl.direction = 'recv' and in_dnl.peer_id = ne_state.peer_id and log_time between trunc(sysdate) and sysdate), 0))) end) recv_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select sum(length) from dcc_ne_log in_dnl where in_dnl.direction = 'recv' and in_dnl.peer_id = ne_state.peer_id and log_time between trunc(sysdate) and sysdate), 0))) end) recv_byte, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log in_dnl where in_dnl.direction = 'send' and in_dnl.peer_id = ne_state.peer_id and log_time between trunc(sysdate) and sysdate), 0))) end) send_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select sum(length) from dcc_ne_log in_dnl where in_dnl.direction = 'send' and in_dnl.peer_id = ne_state.peer_id and log_time between trunc(sysdate) and sysdate), 0))) end) send_byte from dcc_ne_log,ne_state where ne_state.peer_id = dcc_ne_log.peer_id(+); 第1次改造后的SQL執行計劃如下: SELECT STATEMENT, GOAL = ALL_ROWS 34310 7 336 HASH UNIQUE 6001 119421 6448734 MERGE JOIN OUTER 4414 119421 6448734 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 2866104 SORT JOIN 3818 2 60 VIEW IDEPTEST 3817 2 60 SORT GROUP BY 3817 2 84 TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_SYS_LOG 3816 30 1260 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 SORT GROUP BY NOSORT 302 1 31 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 1 31 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 33 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 540 17820 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 37 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 540 19980 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 33 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 503 16599 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 SORT AGGREGATE 1 37 FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 503 18611 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 HASH UNIQUE 34310 7 336 HASH JOIN OUTER 34309 1299 62352 VIEW IDEPTEST 34222 7 189 HASH UNIQUE 34222 7 336 COUNT FILTER HASH JOIN RIGHT OUTER 33949 4896255 235020240 VIEW IDEPTEST 28574 10 240 HASH UNIQUE 28574 10 1160 HASH JOIN 28573 1561 181076 HASH JOIN 28486 3874 368030 VIEW SYS VW_SQ_1 14365 41 1353 HASH GROUP BY 14365 41 2624 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14092 4895849 313334336 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14107 2462339 152665018 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 5347 4896255 117510120 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 ---第2次改造----動作:構造dcc_ne_log_time的WITH子句. 效果:多次調用這個后,系統內部優化,產生自帶臨時表SYS_TEMP_0FD9D661A_2F9A0F1,多次調用SYS_TEMP_0FD9D661A_2F9A0F1而非調用dcc_ne_log是有差別的,調用SYS_TEMP_0FD9D661A_2F9A0F1極大的提升了性能,從50秒縮短為14秒! 第2次改造后SQL語句如下: with ne_state as (select distinct dsl1.peer_id peer_id, nvl(ne_disconnect_info.ne_state, 1) ne_state from dcc_sys_log dsl1, (select distinct dnl.peer_id peer_id, decode(action, 'disconnect', 0, 'connect', 0, 1) ne_state from dcc_sys_log dsl, dcc_ne_log dnl where dsl.peer_id = dnl.peer_id and ((dsl.action = 'disconnect' and dsl.cause = '關閉對端') or (dsl.action = 'connect' and dsl.cause = '連接主機失敗')) and dsl.log_time = (select max(log_time) from dcc_sys_log where peer_id = dnl.peer_id and log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+) and rownum>=0), dcc_ne_log_time as (select * from dcc_ne_log where log_time between trunc(sysdate) and sysdate ) select distinct ne_state.peer_id peer_name, to_char(ne_state.ne_state) peer_state, (case when ne_state.ne_state = 0 then to_char(0) else (select distinct to_char(nvl(ne_active.active, 0)) from dcc_sys_log, (select peer_id, decode(action, 'active', 1, 'de-active', 0, 0) active, max(log_time) from dcc_sys_log where action = 'active' or action = 'de-active' group by (peer_id, action)) ne_active where dcc_sys_log.peer_id = ne_active.peer_id(+) and dcc_sys_log.peer_id = ne_state.peer_id) end) peer_active, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log_time where dcc_ne_log_time.result <> 1 and peer_id = ne_state.peer_id group by (peer_id)), 0))) end) err_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log_time in_dnl where in_dnl.direction = 'recv' and in_dnl.peer_id = ne_state.peer_id), 0))) end) recv_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select sum(length) from dcc_ne_log_time in_dnl where in_dnl.direction = 'recv' and in_dnl.peer_id = ne_state.peer_id), 0))) end) recv_byte, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log_time in_dnl where in_dnl.direction = 'send' and in_dnl.peer_id = ne_state.peer_id), 0))) end) send_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select sum(length) from dcc_ne_log_time in_dnl where in_dnl.direction = 'send' and in_dnl.peer_id = ne_state.peer_id), 0))) end) send_byte from dcc_ne_log_time,ne_state where ne_state.peer_id = dcc_ne_log_time.peer_id(+); 第2次改造后的SQL執行計劃如下: SELECT STATEMENT, GOAL = ALL_ROWS 34584 7 336 HASH UNIQUE 6001 119421 6448734 MERGE JOIN OUTER 4414 119421 6448734 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 2866104 SORT JOIN 3818 2 60 VIEW IDEPTEST 3817 2 60 SORT GROUP BY 3817 2 84 TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_SYS_LOG 3816 30 1260 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 SORT GROUP BY NOSORT 58 7 238 VIEW IDEPTEST 58 7302 248268 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D661A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 53 VIEW IDEPTEST 58 7302 387006 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D661A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 57 VIEW IDEPTEST 58 7302 416214 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D661A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 53 VIEW IDEPTEST 58 7302 387006 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D661A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 57 VIEW IDEPTEST 58 7302 416214 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D661A_2F9A0F1 58 7302 1891218 TEMP TABLE TRANSFORMATION LOAD AS SELECT COUNT FILTER FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 7302 1891218 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 HASH UNIQUE 34282 7 336 HASH JOIN OUTER 34281 7 336 VIEW IDEPTEST 34222 7 189 HASH UNIQUE 34222 7 336 COUNT FILTER HASH JOIN RIGHT OUTER 33949 4896255 235020240 VIEW IDEPTEST 28574 10 240 HASH UNIQUE 28574 10 1160 HASH JOIN 28573 1561 181076 HASH JOIN 28486 3874 368030 VIEW SYS VW_SQ_1 14365 41 1353 HASH GROUP BY 14365 41 2624 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14092 4895849 313334336 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14107 2462339 152665018 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 5347 4896255 117510120 VIEW IDEPTEST 58 7302 153342 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D661A_2F9A0F1 58 7302 1891218 ----第3次改進-----動作:部分SQL進行等價改寫 將 select distinct to_char(nvl(ne_active.active, 0)) from dcc_sys_log, (select peer_id, decode(action, 'active', 1, 'de-active', 0, 0) active, max(log_time) from dcc_sys_log where action = 'active' or action = 'de-active' group by (peer_id, action)) ne_active where dcc_sys_log.peer_id = ne_active.peer_id(+) and dcc_sys_log.peer_id = ne_state.peer_id 修改為: NVL((select '1' from dcc_sys_log where peer_id = ne_state.peer_id and action = 'active' and rownum=1),'0') 效果:改寫的寫法dcc_sys_log表的掃描1次數,而未改寫時是掃描dcc_sys_log表2次性能提升,執行速度由15秒變為10秒 第3次改造后的SQL語句如下: with ne_state as (select distinct dsl1.peer_id peer_id, nvl(ne_disconnect_info.ne_state, 1) ne_state from dcc_sys_log dsl1, (select distinct dnl.peer_id peer_id, decode(action, 'disconnect', 0, 'connect', 0, 1) ne_state from dcc_sys_log dsl, dcc_ne_log dnl where dsl.peer_id = dnl.peer_id and ((dsl.action = 'disconnect' and dsl.cause = '關閉對端') or (dsl.action = 'connect' and dsl.cause = '連接主機失敗')) and dsl.log_time = (select max(log_time) from dcc_sys_log where peer_id = dnl.peer_id and log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+) and rownum>=0), dcc_ne_log_time as (select * from dcc_ne_log where log_time between trunc(sysdate) and sysdate ) select distinct ne_state.peer_id peer_name, to_char(ne_state.ne_state) peer_state, (case when ne_state.ne_state = 0 then to_char(0) else NVL((select '1' from dcc_sys_log where peer_id = ne_state.peer_id and action = 'active' and rownum=1),'0') end) peer_active, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log_time where dcc_ne_log_time.result <> 1 and peer_id = ne_state.peer_id group by (peer_id)), 0))) end) err_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log_time in_dnl where in_dnl.direction = 'recv' and in_dnl.peer_id = ne_state.peer_id), 0))) end) recv_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select sum(length) from dcc_ne_log_time in_dnl where in_dnl.direction = 'recv' and in_dnl.peer_id = ne_state.peer_id), 0))) end) recv_byte, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select count(*) from dcc_ne_log_time in_dnl where in_dnl.direction = 'send' and in_dnl.peer_id = ne_state.peer_id), 0))) end) send_cnt, (case when ne_state.ne_state = 0 then to_char(0) else (to_char(nvl((select sum(length) from dcc_ne_log_time in_dnl where in_dnl.direction = 'send' and in_dnl.peer_id = ne_state.peer_id), 0))) end) send_byte from dcc_ne_log_time,ne_state where ne_state.peer_id = dcc_ne_log_time.peer_id(+); 第3次改造后的SQL執行計劃如下: SELECT STATEMENT, GOAL = ALL_ROWS 34584 7 336 COUNT STOPKEY TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_SYS_LOG 3816 1 35 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 SORT GROUP BY NOSORT 58 7 238 VIEW IDEPTEST 58 7302 248268 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D662A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 44 VIEW IDEPTEST 58 7302 321288 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D662A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 57 VIEW IDEPTEST 58 7302 416214 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D662A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 44 VIEW IDEPTEST 58 7302 321288 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D662A_2F9A0F1 58 7302 1891218 SORT AGGREGATE 1 57 VIEW IDEPTEST 58 7302 416214 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D662A_2F9A0F1 58 7302 1891218 TEMP TABLE TRANSFORMATION LOAD AS SELECT FILTER TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 302 7302 1891218 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 21 7302 HASH UNIQUE 34282 7 336 HASH JOIN OUTER 34281 7 336 VIEW IDEPTEST 34222 7 189 HASH UNIQUE 34222 7 336 COUNT FILTER HASH JOIN RIGHT OUTER 33949 4896255 235020240 VIEW IDEPTEST 28574 10 240 HASH UNIQUE 28574 10 1160 HASH JOIN 28573 1561 181076 HASH JOIN 28486 3874 368030 VIEW SYS VW_SQ_1 14365 41 1353 HASH GROUP BY 14365 41 2624 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14092 4895849 313334336 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14107 2462339 152665018 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 5347 4896255 117510120 VIEW IDEPTEST 58 7302 153342 TABLE ACCESS FULL SYS SYS_TEMP_0FD9D662A_2F9A0F1 58 7302 1891218 ---第4次改造---- 動作:將所有標量子查詢改造為單獨一個表關聯寫法 效果:將標量子查詢的多次掃描,降低為將表掃描次僅1次,執行時間從10秒縮短為7秒with ne_state as (select distinct dsl1.peer_id peer_id, nvl(ne_disconnect_info.ne_state, 1) ne_state from dcc_sys_log dsl1, (select distinct dnl.peer_id peer_id, decode(action, 'disconnect', 0, 'connect', 0, 1) ne_state from dcc_sys_log dsl, dcc_ne_log dnl where dsl.peer_id = dnl.peer_id and ((dsl.action = 'disconnect' and dsl.cause = '關閉對端') or (dsl.action = 'connect' and dsl.cause = '連接主機失敗')) and dsl.log_time = (select max(log_time) from dcc_sys_log where peer_id = dnl.peer_id and log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+) and rownum>=0), dcc_ne_log_time as (select peer_id,COUNT(CASE WHEN RESULT <> 1 THEN 1 END) err_cnt,COUNT(CASE WHEN direction = 'recv' THEN 1 END) recv_cnt,SUM(CASE WHEN direction = 'recv' THEN length END) recv_byte,COUNT(CASE WHEN direction = 'send' THEN 1 END) send_cnt,SUM(CASE WHEN direction = 'send' THEN length END) send_bytefrom dcc_ne_log where log_time >=trunc(sysdate) ---- between trunc(sysdate) and sysdate GROUP BY peer_id) select distinct ne_state.peer_id peer_name, to_char(ne_state.ne_state) peer_state, (case when ne_state.ne_state = 0 then to_char(0) else NVL((select '1' from dcc_sys_log where peer_id = ne_state.peer_id and action = 'active' and rownum=1),'0') end) peer_active, decode(ne_state.ne_state,0,'0',nvl(dnlt.ERR_CNT,0)) ERR_CNT, ---注意NVL改造decode(ne_state.ne_state,0,'0',nvl(dnlt.recv_cnt,0)) recv_cnt, decode(ne_state.ne_state,0,'0',nvl(dnlt.recv_byte,0)) recv_byte,decode(ne_state.ne_state,0,'0',nvl(dnlt.send_cnt,0)) send_cnt, decode(ne_state.ne_state,0,'0',nvl(dnlt.send_byte,0)) send_byte from ne_state ,dcc_ne_log_time dnlt where ne_state.peer_id=dnlt.peer_id(+)第4次改造后的SQL執行計劃 SELECT STATEMENT, GOAL = ALL_ROWS 34231 7 791 COUNT STOPKEY TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_SYS_LOG 3816 1 35 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 596 119421 HASH UNIQUE 34231 7 791 HASH JOIN OUTER 34230 7 791 VIEW IDEPTEST 34222 7 189 HASH UNIQUE 34222 7 336 COUNT FILTER HASH JOIN RIGHT OUTER 33949 4896255 235020240 VIEW IDEPTEST 28574 10 240 HASH UNIQUE 28574 10 1160 HASH JOIN 28573 1561 181076 HASH JOIN 28486 3874 368030 VIEW SYS VW_SQ_1 14365 41 1353 HASH GROUP BY 14365 41 2624 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14092 4895849 313334336 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14107 2462339 152665018 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 86 85428 1793988 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 5347 4896255 117510120 VIEW IDEPTEST 8 7 602 HASH GROUP BY 8 7 280 TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 7 108 4320 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 2 108 ---第5次改造如下部分(即我改造后語句的第一個WITH的地方),覺的非常怪異外面用 action和cause,里面在查最大時間用的是log_type = '對端交互'根據我的經驗常識來看,需求應該是log_type = '對端交互'同時作用于內外,即找出log_type = '對端交互'的最近一條記錄,然后看它的action和cause是否滿足要求,難道不是這樣嗎,這樣才順暢嘛。 select distinct dsl1.peer_id peer_id, nvl(ne_disconnect_info.ne_state, 1) ne_state from dcc_sys_log dsl1, (select distinct dnl.peer_id peer_id, decode(action, 'disconnect', 0, 'connect', 0, 1) ne_state from dcc_sys_log dsl, dcc_ne_log dnl where dsl.peer_id = dnl.peer_id and ((dsl.action = 'disconnect' and dsl.cause = '關閉對端') or (dsl.action = 'connect' and dsl.cause = '連接主機失敗')) and dsl.log_time = (select max(log_time) from dcc_sys_log where peer_id = dnl.peer_id and log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+) and rownum>=0如果我猜測的是對的,代碼就應該改寫為如下(增加and log_type = '對端交互' ):把數據限于log_type = '對端交互'的那些。如果有哪個peer_id不存在 log_type = '對端交互',那么這個peer_id不在新寫法中出現。之前的舊寫法包括所有。select distinct dsl1.peer_id peer_id, nvl(ne_disconnect_info.ne_state, 1) ne_state from dcc_sys_log dsl1, (select distinct dnl.peer_id peer_id, decode(action, 'disconnect', 0, 'connect', 0, 1) ne_state from dcc_sys_log dsl, dcc_ne_log dnl where dsl.peer_id = dnl.peer_id and ((dsl.action = 'disconnect' and dsl.cause = '關閉對端') or (dsl.action = 'connect' and dsl.cause = '連接主機失敗')) and log_type = '對端交互' and dsl.log_time = (select max(log_time) from dcc_sys_log where peer_id = dnl.peer_id and log_type = '對端交互')) ne_disconnect_infowhere dsl1.peer_id = ne_disconnect_info.peer_id(+) and rownum>=0以下改寫可以完善取最大日期的記錄的方法,可進一步減少掃描次數SELECT a.peer_id, CASE WHEN dnl.peer_id IS NOT NULL AND str IN ('disconnect關閉對端','connect連接主機失敗') THEN '0' ELSE '1' END ne_state FROM (SELECT peer_id,MIN(action||cause) KEEP(DENSE_RANK LAST ORDER BY log_time) str FROM dcc_sys_log dslWHERE log_type = '對端交互'GROUP BY peer_id ) a,(SELECT DISTINCT peer_id FROM dcc_ne_log) dnl WHERE a.peer_id = dnl.peer_id(+)經過5次改造后,最終完善優化版代碼如下:with ne_state as (SELECT a.peer_id, CASE WHEN dnl.peer_id IS NOT NULL AND str IN ('disconnect關閉對端','connect連接主機失敗') THEN '0' ELSE '1' END ne_state FROM (SELECT peer_id,MIN(action||cause) KEEP(DENSE_RANK LAST ORDER BY log_time) str FROM dcc_sys_log dslWHERE log_type = '對端交互'GROUP BY peer_id ) a,(SELECT DISTINCT peer_id FROM dcc_ne_log) dnl WHERE a.peer_id = dnl.peer_id(+)), dcc_ne_log_time as (select peer_id,COUNT(CASE WHEN RESULT <> 1 THEN 1 END) err_cnt,COUNT(CASE WHEN direction = 'recv' THEN 1 END) recv_cnt,SUM(CASE WHEN direction = 'recv' THEN length END) recv_byte,COUNT(CASE WHEN direction = 'send' THEN 1 END) send_cnt,SUM(CASE WHEN direction = 'send' THEN length END) send_bytefrom dcc_ne_log where log_time >=trunc(sysdate) ---- between trunc(sysdate) and sysdate GROUP BY peer_id) select distinct ne_state.peer_id peer_name, to_char(ne_state.ne_state) peer_state, (case when ne_state.ne_state = 0 then to_char(0) else NVL((select '1' from dcc_sys_log where peer_id = ne_state.peer_id and action = 'active' and rownum=1),'0') end) peer_active, decode(ne_state.ne_state,0,'0',nvl(dnlt.ERR_CNT,0)) ERR_CNT, ---注意NVL改造decode(ne_state.ne_state,0,'0',nvl(dnlt.recv_cnt,0)) recv_cnt, decode(ne_state.ne_state,0,'0',nvl(dnlt.recv_byte,0)) recv_byte,decode(ne_state.ne_state,0,'0',nvl(dnlt.send_cnt,0)) send_cnt, decode(ne_state.ne_state,0,'0',nvl(dnlt.send_byte,0)) send_byte from ne_state ,dcc_ne_log_time dnlt where ne_state.peer_id=dnlt.peer_id(+)執行計劃: SELECT STATEMENT, GOAL = ALL_ROWS 14880 71 19667 COUNT STOPKEY TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_SYS_LOG 3235 1 35 INDEX RANGE SCAN IDEPTEST IDX_DCC_SYS_LOG_PEER 505 100809 HASH UNIQUE 14880 71 19667 HASH JOIN OUTER 14878 19100 5290700 HASH JOIN RIGHT OUTER 14657 49 12446 VIEW IDEPTEST 65 10 880 HASH GROUP BY 65 10 430 TABLE ACCESS BY INDEX ROWID IDEPTEST DCC_NE_LOG 64 1388 59684 INDEX RANGE SCAN IDEPTEST IDX_DCC_NE_LOG_TIME 7 1388 VIEW IDEPTEST 14592 49 8134 SORT GROUP BY 14592 49 3479 TABLE ACCESS FULL IDEPTEST DCC_SYS_LOG 14316 4939225 350684975 INDEX FAST FULL SCAN IDEPTEST IDX_DCC_NE_LOG_PEER 220 176424 4057752GROUP BY 的合并
select decode(so.sFileName, 'SNP_20', 'SNP', 'HNIC_2', 'HNIC', 'IBRC_2', 'IBRC', 'IISMP_', 'IISMP', 'NIC_20', 'NIC', 'NIG_20', 'NIG', 'IIC_20', 'IIC', 'HIIC_2', 'HIIC', 'CA.D.A', 'CA.D.ATSR', 'ULH_20', 'ULH', 'IBRST_', 'IBRST', so.sFileName) 業務名稱,so.sFileCount 合并前文件個數,so.sRecordNum 合并前總記錄數,ta.tFileCount 合并后文件個數,ta.tRecordNum 合并后總記錄數,NVL(so1.sFileCount, 0) 合并前當天文件個數,NVL(so1.sRecordNum, 0) 合并前當天文件總記錄數,NVL(so2.sFileCount, 0) 合并前昨天文件個數,NVL(so2.sRecordNum, 0) 合并前昨天文件總記錄數 from (select substr(a.file_name, 1, 6) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNumfrom (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_id and trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd')) agroup by substr(a.file_name, 1, 6)order by substr(a.file_name, 1, 6)) so LEFT JOIN (select substr(a.file_name, 1, 6) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNumfrom (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_idand trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd')AND FILE_NAME LIKE '%20100805%') agroup by substr(a.file_name, 1, 6)order by substr(a.file_name, 1, 6)) so1 ON (so.sFileName = so1.sFileName) LEFT JOIN (select substr(a.file_name, 1, 6) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNumfrom (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_idand trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd')AND FILE_NAME not LIKE '%20100805%') agroup by substr(a.file_name, 1, 6)order by substr(a.file_name, 1, 6)) so2 ON (so.sFileName = so2.sFileName) LEFT JOIN (select substr(a.file_name, 1, 6) tFileName,count(*) tFileCount,sum(record_num) tRecordNumfrom (select distinct ipsf.file_name, ipsf.record_numfrom idep_plugin_send_filelist ipsfwhere trunc(ipsf.create_time) = to_date('2010-08-05', 'yyyy-mm-dd')and remark = '處理成功') agroup by substr(a.file_name, 1, 6)order by substr(a.file_name, 1, 6)) taON (so.sFileName = ta.tFileName)where so.sFileName not like 'MVI%' union select so.sFileName,so.sFileCount,(so.sRecordNum - (so.sFileCount * 2)) sRecordNum,ta.tFileCount,ta.tRecordNum,NVL(so1.sFileCount, 0),(nvl(so1.sRecordNum, 0) - (nvl(so1.sFileCount, 0) * 2)),NVL(so2.sFileCount, 0),(nvl(so2.sRecordNum, 0) - (nvl(so2.sFileCount, 0) * 2)) from (select substr(a.file_name, 1, 3) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNumfrom (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_idand trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd')) agroup by substr(a.file_name, 1, 3)order by substr(a.file_name, 1, 3)) so LEFT JOIN (select substr(a.file_name, 1, 3) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNumfrom (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_idand trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd')AND FILE_NAME LIKE 'MVI100805%') agroup by substr(a.file_name, 1, 3)order by substr(a.file_name, 1, 3)) so1 ON (so.sFileName = so1.sFileName) LEFT JOIN (select substr(a.file_name, 1, 3) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNumfrom (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_idand trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd')AND FILE_NAME not LIKE 'MVI100805%') agroup by substr(a.file_name, 1, 3)order by substr(a.file_name, 1, 3)) so2 ON (so.sFileName = so2.sFileName) LEFT JOIN (select substr(a.file_name, 1, 3) tFileName,count(*) tFileCount,sum(record_num) tRecordNumfrom (select distinct ipsf.file_name, ipsf.record_numfrom idep_plugin_send_filelist ipsfwhere trunc(ipsf.create_time) = to_date('2010-08-05', 'yyyy-mm-dd')and remark = '處理成功') agroup by substr(a.file_name, 1, 3)order by substr(a.file_name, 1, 3)) ta ON (so.sFileName = ta.tFileName) WHERE so.sFileName = 'MVI' ---這里有錯吧,感覺應該是 WHERE so.sFileName like 'MVI%',確實沒錯,因為這里是截取過的了注意點:1.將trunc(ipar.relation_time) = to_date('2010-08-05', 'yyyy-mm-dd') 等等類似之處改寫為如下,要避免對列進行運算,這樣會 導致用不上索引,除非是建立了函數索引。 ipsf.create_time >= to_date('2010-08-05', 'yyyy-mm-dd') and ipsf.create_time < to_date('2010-08-05', 'yyyy-mm-dd')+12. 確保IDEP_PLUGIN_AUTO_RELATION的relation_time有索引確保idep_plugin_send_filelist的create_time列有索引3.可通過CASE WHEN 語句進一步減少表掃描次數,如(count(CASE WHEN FILE_NAME LIKE '%20100805%' THEN 1 END) sFileCount1), 類似如上的修改,可以等價改寫,將本應用的表掃描從8次減少為4次。代碼改寫如下: select decode(so.sFileName, 'SNP_20', 'SNP', 'HNIC_2', 'HNIC', 'IBRC_2', 'IBRC', 'IISMP_', 'IISMP', 'NIC_20', 'NIC', 'NIG_20', 'NIG', 'IIC_20', 'IIC', 'HIIC_2', 'HIIC', 'CA.D.A', 'CA.D.ATSR', 'ULH_20', 'ULH', 'IBRST_', 'IBRST', so.sFileName) 業務名稱,so.sFileCount 合并前文件個數,so.sRecordNum 合并前總記錄數,ta.tFileCount 合并后文件個數,ta.tRecordNum 合并后總記錄數,NVL(so.sFileCount1, 0) 合并前當天文件個數,NVL(so.sRecordNum1, 0) 合并前當天文件總記錄數,NVL(so.sFileCount2, 0) 合并前昨天文件個數,NVL(so.sRecordNum2, 0) 合并前昨天文件總記錄數 from (select substr(a.file_name, 1, 6) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNum,count(CASE WHEN FILE_NAME LIKE '%20100805%' THEN 1 END) sFileCount1,sum(CASE WHEN FILE_NAME LIKE '%20100805%' THEN sRecordNum END) sRecordNum1,count(CASE WHEN FILE_NAME NOT LIKE '%20100805%' THEN 1 END) sFileCount2,sum(CASE WHEN FILE_NAME NOT LIKE '%20100805%' THEN sRecordNum END) sRecordNum2from (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_id and ipar.relation_time >= to_date('2010-08-05', 'yyyy-mm-dd')and ipar.relation_time < to_date('2010-08-05', 'yyyy-mm-dd')+1) agroup by substr(a.file_name, 1, 6)order by substr(a.file_name, 1, 6)) so LEFT JOIN (select substr(a.file_name, 1, 6) tFileName,count(*) tFileCount,sum(record_num) tRecordNumfrom (select distinct ipsf.file_name, ipsf.record_numfrom idep_plugin_send_filelist ipsfwhere ipsf.create_time >= to_date('2010-08-05', 'yyyy-mm-dd')and ipsf.create_time < to_date('2010-08-05', 'yyyy-mm-dd')+1and remark = '處理成功') agroup by substr(a.file_name, 1, 6)order by substr(a.file_name, 1, 6)) taON (so.sFileName = ta.tFileName)where so.sFileName not like 'MVI%' unionselect so.sFileName,so.sFileCount,(so.sRecordNum - (so.sFileCount * 2)) sRecordNum,ta.tFileCount,ta.tRecordNum,NVL(so.sFileCount1, 0),(nvl(so.sRecordNum1, 0) - (nvl(so.sFileCount1, 0) * 2)),NVL(so.sFileCount2, 0),(nvl(so.sRecordNum2, 0) - (nvl(so.sFileCount2, 0) * 2)) from (select substr(a.file_name, 1, 3) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNum,count(CASE WHEN FILE_NAME LIKE 'MVI100805%' THEN 1 END) sFileCount1,sum(CASE WHEN FILE_NAME LIKE 'MVI100805%' THEN sRecordNum END) sRecordNum1,count(CASE WHEN FILE_NAME NOT LIKE 'MVI100805%' THEN 1 END) sFileCount2,sum(CASE WHEN FILE_NAME NOT LIKE 'MVI100805%' THEN sRecordNum END) sRecordNum2from (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_idand ipar.relation_time >= to_date('2010-08-05', 'yyyy-mm-dd')and ipar.relation_time < to_date('2010-08-05', 'yyyy-mm-dd')+1) agroup by substr(a.file_name, 1, 3)order by substr(a.file_name, 1, 3)) so LEFT JOIN (select substr(a.file_name, 1, 3) tFileName,count(*) tFileCount,sum(record_num) tRecordNumfrom (select distinct ipsf.file_name, ipsf.record_numfrom idep_plugin_send_filelist ipsfwhere ipsf.create_time >= to_date('2010-08-05', 'yyyy-mm-dd')and ipsf.create_time < to_date('2010-08-05', 'yyyy-mm-dd')+1and remark = '處理成功') agroup by substr(a.file_name, 1, 3)order by substr(a.file_name, 1, 3)) ta ON (so.sFileName = ta.tFileName) WHERE so.sFileName = 'MVI'最終版 select decode(so.sFileName, 'SNP_20', 'SNP', 'HNIC_2', 'HNIC', 'IBRC_2', 'IBRC', 'IISMP_', 'IISMP', 'NIC_20', 'NIC', 'NIG_20', 'NIG', 'IIC_20', 'IIC', 'HIIC_2', 'HIIC', 'CA.D.A', 'CA.D.ATSR', 'ULH_20', 'ULH', 'IBRST_', 'IBRST', so.sFileName) 業務名稱,so.sFileCount 合并前文件個數,case when so.sfilename like 'MVI%' then (so.sRecordNum - (so.sFileCount * 2)) else so.sRecordNum end 合并前總記錄數,ta.tFileCount 合并后文件個數,ta.tRecordNum 合并后總記錄數,NVL(so.sFileCount1, 0) 合并前當天文件個數,case when so.sfilename like 'MVI%' then (nvl(so.sRecordNum1, 0) - (nvl(so.sFileCount1, 0) * 2)) else NVL(so.sRecordNum1, 0) end 合并前當天文件總記錄數,NVL(so.sFileCount2, 0) 合并前昨天文件個數,case when so.sfilename like 'MVI%' then (nvl(so.sRecordNum2, 0) - (nvl(so.sFileCount2, 0) * 2)) else NVL(so.sRecordNum2, 0) end 合并前昨天文件總記錄數 from (select substr(a.file_name, 1, CASE WHEN a.file_name like 'MVI%' THEN 3 ELSE 6 END) sfileName,count(*) sFileCount,sum(sRecordNum) sRecordNum,count(CASE WHEN (FILE_NAME LIKE '%20100805%' AND FILE_NAME not like 'MVI%') OR (FILE_NAME LIKE 'MVI100805%' AND FILE_NAME like 'MVI%') THEN 1 END) sFileCount1,sum (CASE WHEN (FILE_NAME LIKE '%20100805%' AND FILE_NAME not like 'MVI%') OR (FILE_NAME LIKE 'MVI100805%' AND FILE_NAME like 'MVI%') THEN sRecordNum END) sRecordNum1,count(CASE WHEN (FILE_NAME NOT LIKE '%20100805%' AND FILE_NAME not like 'MVI%') OR (FILE_NAME NOT LIKE 'MVI100805%' AND FILE_NAME like 'MVI%') THEN 1 END) sFileCount2,sum (CASE WHEN (FILE_NAME NOT LIKE '%20100805%' AND FILE_NAME not like 'MVI%') OR (FILE_NAME NOT LIKE 'MVI100805%' AND FILE_NAME like 'MVI%') THEN sRecordNum END) sRecordNum2from (select distinct bsf.file_name,bsf.record_num sRecordNum,bsf.create_timefrom BUSINESS_SEND_FILELIST bsf, IDEP_PLUGIN_AUTO_RELATION iparwhere bsf.file_id = ipar.bus_file_id and ipar.relation_time >= to_date('2010-08-05', 'yyyy-mm-dd')and ipar.relation_time < to_date('2010-08-05', 'yyyy-mm-dd')+1) agroup by substr(a.file_name, 1, CASE WHEN a.file_name like 'MVI%' THEN 3 ELSE 6 END)order by substr(a.file_name, 1, CASE WHEN a.file_name like 'MVI%' THEN 3 ELSE 6 END)) so LEFT JOIN (select substr(a.file_name, 1, CASE WHEN a.file_name like 'MVI%' THEN 3 ELSE 6 END) tFileName,count(*) tFileCount,sum(record_num) tRecordNumfrom (select distinct ipsf.file_name, ipsf.record_numfrom idep_plugin_send_filelist ipsfwhere ipsf.create_time >= to_date('2010-08-05', 'yyyy-mm-dd')and ipsf.create_time < to_date('2010-08-05', 'yyyy-mm-dd')+1and remark = '處理成功') agroup by substr(a.file_name, 1, CASE WHEN a.file_name like 'MVI%' THEN 3 ELSE 6 END)order by substr(a.file_name, 1, CASE WHEN a.file_name like 'MVI%' THEN 3 ELSE 6 END)) taON (so.sFileName = ta.tFileName)讓人懷疑自己眼睛的SQL
IDEP的SQL語句。 該語句存在性能問題,執行非常緩慢,極耗CPU,為了實現行列轉換的需求,具體如下: select distinct to_char(a.svcctx_id),to_char(0),to_char(nvl((select peer_idfrom dcc_ne_configwhere peer_name = a.peer),0)),to_char(a.priority),to_char(nvl((select peer_idfrom dcc_ne_configwhere peer_name = b.peer),0)),to_char(b.priority),to_char(nvl((select peer_idfrom dcc_ne_configwhere peer_name = c.peer),0)),to_char(c.priority)from (select hopbyhop,svcctx_id,substr(cause,instr(cause, 'Host = ') + 7,instr(cause, 'Priority = ') - instr(cause, 'Host = ') - 11) peer,substr(cause,instr(cause, 'Priority = ') + 11,instr(cause, 'reachable = ') -instr(cause, 'Priority = ') - 13) priorityfrom dcc_sys_logwhere cause like '%SC路由應答%'and hopbyhop in (select distinct hopbyhop from dcc_sys_log)) a,(select hopbyhop,svcctx_id,substr(cause,instr(cause, 'Host = ') + 7,instr(cause, 'Priority = ') - instr(cause, 'Host = ') - 11) peer,substr(cause,instr(cause, 'Priority = ') + 11,instr(cause, 'reachable = ') -instr(cause, 'Priority = ') - 13) priorityfrom dcc_sys_logwhere cause like '%SC路由應答%'and hopbyhop in (select distinct hopbyhop from dcc_sys_log)) b,(select hopbyhop,svcctx_id,substr(cause,instr(cause, 'Host = ') + 7,instr(cause, 'Priority = ') - instr(cause, 'Host = ') - 11) peer,substr(cause,instr(cause, 'Priority = ') + 11,instr(cause, 'reachable = ') -instr(cause, 'Priority = ') - 13) priorityfrom dcc_sys_logwhere cause like '%SC路由應答%'and hopbyhop in (select distinct hopbyhop from dcc_sys_log)) cwhere a.hopbyhop = b.hopbyhopand a.hopbyhop = c.hopbyhopand a.peer <> b.peerand a.peer <> c.peerand b.peer <> c.peerand a.priority <> b.priorityand a.priority <> c.priorityand b.priority <> c.priority執行計劃:Execution Plan ---------------------------------------------------------- Plan hash value: 408096778-------------------------------------------------------------------------------------------------- | Id | Operation | Name | Rows | Bytes |TempSpc| Cost (%CPU)| Time | -------------------------------------------------------------------------------------------------- | 0 | SELECT STATEMENT | | 1941 | 159K| | 18E(100)|999:59:59 | |* 1 | TABLE ACCESS FULL | DCC_NE_CONFIG | 1 | 28 | | 3 (0)| 00:00:01 | |* 2 | TABLE ACCESS FULL | DCC_NE_CONFIG | 1 | 28 | | 3 (0)| 00:00:01 | |* 3 | TABLE ACCESS FULL | DCC_NE_CONFIG | 1 | 28 | | 3 (0)| 00:00:01 | | 4 | HASH UNIQUE | | 1941 | 159K| | 18E(100)|999:59:59 | | 5 | MERGE JOIN | | 18E| 15E| | 18E(100)|999:59:59 | | 6 | MERGE JOIN | | 18E| 15E| | 32P(100)|999:59:59 | | 7 | MERGE JOIN | | 1147T| 79P| | 1018T(100)|999:59:59 | | 8 | SORT JOIN | | 746T| 36P| 85P| 101G (95)|999:59:59 | |* 9 | HASH JOIN | | 746T| 36P| 70M| 4143M(100)|999:59:59 | | 10 | TABLE ACCESS FULL | DCC_SYS_LOG | 4939K| 14M| | 14325 (1)| 00:02:52 | |* 11 | HASH JOIN | | 151M| 7530M| 8448K| 366K (93)| 01:13:19 | |* 12 | TABLE ACCESS FULL| DCC_SYS_LOG | 246K| 5547K| | 14352 (2)| 00:02:53 | |* 13 | TABLE ACCESS FULL| DCC_SYS_LOG | 246K| 6994K| | 14352 (2)| 00:02:53 | |* 14 | FILTER | | | | | | | |* 15 | SORT JOIN | | 246K| 5547K| 15M| 16046 (2)| 00:03:13 | |* 16 | TABLE ACCESS FULL | DCC_SYS_LOG | 246K| 5547K| | 14352 (2)| 00:02:53 | |* 17 | SORT JOIN | | 4939K| 14M| 113M| 27667 (2)| 00:05:32 | | 18 | TABLE ACCESS FULL | DCC_SYS_LOG | 4939K| 14M| | 14325 (1)| 00:02:52 | |* 19 | SORT JOIN | | 4939K| 14M| 113M| 27667 (2)| 00:05:32 | | 20 | TABLE ACCESS FULL | DCC_SYS_LOG | 4939K| 14M| | 14325 (1)| 00:02:52 | --------------------------------------------------------------------------------------------------Predicate Information (identified by operation id): ---------------------------------------------------1 - filter("PEER_NAME"=SUBSTR(:B1,INSTR(:B2,'Host = ')+7,INSTR(:B3,'Priority =')-INSTR(:B4,'Host = ')-11))2 - filter("PEER_NAME"=SUBSTR(:B1,INSTR(:B2,'Host = ')+7,INSTR(:B3,'Priority =')-INSTR(:B4,'Host = ')-11))3 - filter("PEER_NAME"=SUBSTR(:B1,INSTR(:B2,'Host = ')+7,INSTR(:B3,'Priority =')-INSTR(:B4,'Host = ')-11))9 - access("HOPBYHOP"="HOPBYHOP")11 - access("HOPBYHOP"="HOPBYHOP")filter(SUBSTR("CAUSE",INSTR("CAUSE",'Host = ')+7,INSTR("CAUSE",'Priority =')-INSTR("CAUSE",'Host = ')-11)<>SUBSTR("CAUSE",INSTR("CAUSE",'Host =')+7,INSTR("CAUSE",'Priority = ')-INSTR("CAUSE",'Host = ')-11) ANDSUBSTR("CAUSE",INSTR("CAUSE",'Priority = ')+11,INSTR("CAUSE",'reachable =')-INSTR("CAUSE",'Priority = ')-13)<>SUBSTR("CAUSE",INSTR("CAUSE",'Priority =')+11,INSTR("CAUSE",'reachable = ')-INSTR("CAUSE",'Priority = ')-13))12 - filter("CAUSE" LIKE '%SC路由應答%')13 - filter("CAUSE" LIKE '%SC路由應答%')14 - filter(SUBSTR("CAUSE",INSTR("CAUSE",'Host = ')+7,INSTR("CAUSE",'Priority =')-INSTR("CAUSE",'Host = ')-11)<>SUBSTR("CAUSE",INSTR("CAUSE",'Host =')+7,INSTR("CAUSE",'Priority = ')-INSTR("CAUSE",'Host = ')-11) ANDSUBSTR("CAUSE",INSTR("CAUSE",'Host = ')+7,INSTR("CAUSE",'Priority =')-INSTR("CAUSE",'Host = ')-11)<>SUBSTR("CAUSE",INSTR("CAUSE",'Host =')+7,INSTR("CAUSE",'Priority = ')-INSTR("CAUSE",'Host = ')-11) ANDSUBSTR("CAUSE",INSTR("CAUSE",'Priority = ')+11,INSTR("CAUSE",'reachable =')-INSTR("CAUSE",'Priority = ')-13)<>SUBSTR("CAUSE",INSTR("CAUSE",'Priority =')+11,INSTR("CAUSE",'reachable = ')-INSTR("CAUSE",'Priority = ')-13) ANDSUBSTR("CAUSE",INSTR("CAUSE",'Priority = ')+11,INSTR("CAUSE",'reachable =')-INSTR("CAUSE",'Priority = ')-13)<>SUBSTR("CAUSE",INSTR("CAUSE",'Priority =')+11,INSTR("CAUSE",'reachable = ')-INSTR("CAUSE",'Priority = ')-13))15 - access("HOPBYHOP"="HOPBYHOP")filter("HOPBYHOP"="HOPBYHOP")16 - filter("CAUSE" LIKE '%SC路由應答%')17 - access("HOPBYHOP"="HOPBYHOP")filter("HOPBYHOP"="HOPBYHOP")19 - access("HOPBYHOP"="HOPBYHOP")filter("HOPBYHOP"="HOPBYHOP")Statistics ----------------------------------------------------------54 recursive calls0 db block gets128904 consistent gets5549 physical reads0 redo size1017 bytes sent via SQL*Net to client1422 bytes received via SQL*Net from client1 SQL*Net roundtrips to/from client1 sorts (memory)0 sorts (disk)0 rows processed ------ with t as (select hopbyhop,svcctx_id,substr(cause,instr(cause, 'Host = ') + 7,instr(cause, 'Priority = ') - instr(cause, 'Host = ') - 11) peer,substr(cause,instr(cause, 'Priority = ') + 11,instr(cause, 'reachable = ') -instr(cause, 'Priority = ') - 13)selet * from t1,t2, t where t....?
總結