Wednesday 13 December 2023

script to ddl for table--metadata

 export ORACLE_SID="$1"

export ORACLE_HOME=/u01/db/oracle19c/oraHome_1
export PATH=$PATH:$ORACLE_HOME/bin
export LD_LIBRARY_PATH=/u01/db/oracle19c/oraHome_1/lib

 $ORACLE_HOME/bin/sqlplus /nolog << End_SQL >> r_tables.log

 connect / as sysdba

 SET head OFF

SET feed OFF
SET trimspool ON
SET linesize 32767
SET pagesize 32767
SET echo OFF
SET termout OFF
SET verify OFF

 

@r_tables.sql

 

End_SQL


+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

more r_tables.sql

spool readwrite_tables.sql
select 'alter table ' || owner || '.' || table_name || ' read write;' from dba_tables where read_only='YES';
spool off

 

spool readonly_tables.sql
select 'alter table ' || owner || '.' || table_name || ' read only;' from dba_tables where read_only='YES';
spool off

create the public synonyms in the target database.

 

  1. Prepare the script to create the public synonyms in the target database.

cat header.txt > 13_b_get_public_synonym.pomdir01.sql.out

cat 13_b_get_public_synonym. pomdir01.sql.out | grep "CREATE PUBLIC SYNONYM" | grep -v SELECT  >> create the 13_create_public_synonym.sql

echo "quit" >> create the 13_create_public_synonym.sql



Huge page script

 


awk '/Hugepagesize:/{p=$2} / 0 /{next} / kB$/{v[sprintf("%9d GB %-s",int($2/1024/1024),$0)]=$2;next} {h[$0]=$2} /HugePages_Total/{hpt=$2} /HugePages_Free/{hpf=$2} {h["HugePages Used (Total-Free)"]=hpt-hpf} END{for(k in v) print sprintf("%-60s %10d",k,v[k]/p); for (k in h) print sprintf("%9d GB %-s",p*h[k]/1024/1024,k)}' /proc/meminfo|sort -nr|grep --color=auto -iE "^|( HugePage)[^:]*"

Tuesday 12 December 2023

Analyze a particular SQL ID and see the trends for the past day

 Analyse a particular SQL ID and see the trends for the past day


set pages 50000 lines 32767

select
s.snap_id,
to_char(s.begin_interval_time,'HH24:MI') c1,
sql.executions_delta c2,
sql.buffer_gets_delta c3,
sql.disk_reads_delta c4,
sql.iowait_delta c5,
sql.cpu_time_delta c6,
sql.elapsed_time_delta c7
from
dba_hist_sqlstat sql,
dba_hist_snapshot s
where
s.snap_id = sql.snap_id
and s.begin_interval_time > sysdate -1
and
sql.sql_id='&sqlid'
order by c7
/


Do we have multiple plan hash values for the same SQL ID – in that case may be changed plan is causing bad performance

set pages 50000 lines 32767

select
SQL_ID
, PLAN_HASH_VALUE
, sum(EXECUTIONS_DELTA) EXECUTIONS
, sum(ROWS_PROCESSED_DELTA) CROWS
, trunc(sum(CPU_TIME_DELTA)/1000000/60) CPU_MINS
, trunc(sum(ELAPSED_TIME_DELTA)/1000000/60)  ELA_MINS
from DBA_HIST_SQLSTAT
where SQL_ID in (
'&sqlid')
group by SQL_ID , PLAN_HASH_VALUE
order by SQL_ID, CPU_MINS
/

Featured post

Postgres commads

 [oracle@Tesdb ~]$ systemctl status postgresql-15 ● postgresql-15.service - PostgreSQL 15 database server    Loaded: loaded (/usr/lib/system...