Thursday, December 22, 2011

Find and Delete duplicate check Constraints

--------------------------------------------------------------------------------
--------------------------------------------------------------------------------

-- Find duplicate check constraints in current user's schema that have Oracle
-- generated names (like 'SYS_C%'). A script (c:\drop_duplicate_constraints.sql)
-- is generated to drop those duplicates.
--
-- This removes those duplicated constraints that were probably created by
-- improper usage of the Oracle imp utility. Using CONSTRAINTS=N will not
-- create duplicate constraints when importing into an existing table.
--
-- Tables js_constraints and js_constraints_min are dropped and creaed
-- the current user's schema, drop them to clean up.
--
-- WARNING: Review this and the generated script
-- c:\drop_duplicate_constraints.sql before running it at YOUR RISK.
-- I do not accept any responsibility for what YOU DO TO YOUR DATABASES !
--
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
set linesize 120
set pagesize 50000
set timing on

drop table js_constraints
/
create table js_constraints
(
owner varchar2(30),
constraint_name varchar2(30),
table_name varchar2(30),
search_condition varchar2(2000)
)
/
declare
cursor js_cursor is
select
owner,
table_name,
constraint_name,
search_condition
from user_constraints
where owner = user
and constraint_type = 'C'
and constraint_name like 'SYS_C%';
js_rec js_cursor%rowtype;
commit_interval constant integer := 10000;
records_processed integer := 0;
begin
open js_cursor;
loop
fetch js_cursor into js_rec;
exit when js_cursor%notfound;
insert into js_constraints (
owner,
table_name,
constraint_name,
search_condition)
values (
js_rec.owner,
js_rec.table_name,
js_rec.constraint_name,
substr(js_rec.search_condition,1,2000)
);
records_processed := records_processed + 1;
if records_processed = commit_interval then
commit;
records_processed := 0;
end if;
end loop;
commit;
close js_cursor;
end;
/
create index js_constraints_x1 on js_constraints (owner, table_name, search_condition)
/
drop table js_constraints_min
/
create table js_constraints_min as
select
owner,
table_name,
search_condition,
min(translate(constraint_name, 'SYC_', '9999')) as min_constraint_name,
'at least 30 dummy characters !!!' as constraint_name
from js_constraints
group by owner, table_name, search_condition
/
update js_constraints_min cm
set constraint_name = (select constraint_name from js_constraints c
where c.owner = cm.owner
and c.table_name = cm.table_name
and translate(c.constraint_name, 'SYC_', '9999') = cm.min_constraint_name)
/
delete from js_constraints
where (owner, table_name, search_condition, translate(constraint_name, 'SYC_', '9999')) in
(select owner, table_name, search_condition, min_constraint_name
from js_constraints_min)
/


-------spool c:\js_drop_duplicate_constraints.sql
select
'alter table ' || c.owner || '.' || c.table_name ||
' drop constraint ' || c.constraint_name ||
' --duplicate of ' || cm.constraint_name || chr(13) || '/'
from js_constraints c, js_constraints_min cm
where c.owner = cm.owner
and c.table_name = cm.table_name
and c.search_condition = cm.search_condition
order by c.owner, c.table_name, cm.min_constraint_name, c.constraint_name
/
spool off

Oracle Connection taking long time to establish / tnsping taking too long

1) Check listener.log size
At 10g, D:\oracle\product\10.2.0\db_1\NETWORK\log
At 11g, D:\app\Administrator\diag\tnslsnr\apex11-PC\listener\trace

2) If listener.log size is in GBs then
a) Stop listener services
b) Rename listener.log
c) Start listener services


lsnrctl>show

lsnrctl>set Log_status off

rename Listener.log to listenerold.log (from the command prompt)

lsnrctl>set Log_Status on

it will start listener.log automatically



3) Tnsping



ping
tracert
tnsping

Running SQL query


------------old

set heading off

select aa from (
select distinct a.sql_id,a.piece,a.sql_text aa,b.first_load_time from v$sqltext a,v$sqlarea b,v$session c where
a.sql_id=b.sql_id
AND c.command = b.command_type
And C.Username = B.Parsing_Schema_Name
and c.sql_hash_value = b.hash_value
and c.sql_address = b.address
and c.username is not null
And B.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
order by first_load_time desc,sql_id,piece);




-------------------sqlplus

set heading off

select aa from (
select a.sql_id,a.piece,to_char(b.SQL_FULLTEXT) aa,b.first_load_time from v$sqltext a,v$sqlarea b,v$session c where
a.sql_id=b.sql_id
AND c.command = b.command_type
And C.Username = B.Parsing_Schema_Name
and c.sql_hash_value = b.hash_value
and c.sql_address = b.address
and c.username is not null
And B.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
order by first_load_time desc,sql_id,piece);



------------full sqltext--------at plsql---- sqldeveloper-----

select aa from (
select a.sql_id,a.piece,to_char(b.SQL_FULLTEXT) aa,b.first_load_time from v$sqltext a,v$sqlarea b,v$session c where
a.sql_id=b.sql_id
AND c.command = b.command_type
And C.Username = B.Parsing_Schema_Name
and c.sql_hash_value = b.hash_value
and c.sql_address = b.address
and c.username is not null
And B.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
order by first_load_time desc,sql_id,piece);


----------------------------sqltext with operation-----------

SELECT s.sql_fulltext, sp.id, sp.parent_id, sp.operation, sp.object_name
FROM v$sqlarea s, v$sql_plan sp ,v$session c
WHERE s.address = sp.address AND s.hash_value = sp.hash_value
AND s.plan_hash_value = sp.plan_hash_value
And C.Username = s.Parsing_Schema_Name
and c.sql_hash_value = s.hash_value
and s.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
ORDER BY s.plan_hash_value,sp.id;



Microsoft ODBC driver for Oracle on 64 bit Machine

oracle(tm) client and networking components were not found. these components are supplied by oracle corporation and are part of the oracle version 7.2 (or greater) client software installation

you will be unable to use the drive until these software


The Microsoft ODBC Driver can be installed under 64-bit Windows but 64-bit applications cannot access MS ODBC driver because it comes only in 32-bit version. For 32-bit applications under 64-bit Windows there's ODBC Data Source Administrator for the 32-bit ODBC drivers %systemdrive%\Windows\SysWoW64\odbcad32.exe (usually C:\WINDOWS\SysWOW64\odbcad32.exe).


They put the 32 bit odbcad32.exe in the syswow64 directory. They put the 64 bit odbcad32.exe in the system32 directory. 32 bit apps will pick up the 32 bit registry setting and 64 bit will pick up the 64 bit registry setting. system32 comes before syswow64 in the system path so the 64bit software runs before the 32 bit software.


Install Oracle Server 10.2.0.4

Solution: also install oracle client versiob 10.2.0.3 and above to get required driver



Table Defragmentation / Table Reorganization / Table Rebuilding

Tables in Oracle database become fragmented after mass deletion, or after so many delete and/or insert operations. If you are running a 24×7 DB, you don’t

have an option to reorganize (or defragement) the table by traditional export/truncate/import method, i.e., exporting data from affected table, truncate the

table, then importing data back to the table.
There is an “alter table table_name move” command that you can use to defragment tables.
Note: This method does not apply to tables with with 'LONG' columns.



--------detecting chained row-----

SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;


List Chained Rows

Creating a CHAINED_ROWS Table

@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlchain.sql will create following table


create table CHAINED_ROWS (
owner_name varchar2(30),
table_name varchar2(30),
cluster_name varchar2(30),
partition_name varchar2(30),
subpartition_name varchar2(30),
head_rowid rowid,
analyze_timestamp date
);


SELECT owner_name,table_name, head_rowid FROM chained_rows;


-------------------


SELECT dbms_rowid.rowid_block_number(rowid) "Block-Nr", count(*) "Rows"
FROM
GROUP BY dbms_rowid.rowid_block_number(rowid) order by 1;


SELECT chain_cnt,
round(chain_cnt/num_rows*100,2) pct_chained,
avg_row_len, pct_free , pct_used
FROM user_tables
WHERE TABLE_NAME IN (
SELECT distinct table_name FROM CHAINED_ROWS);



If the table includes LOB column(s), this statement can be used to move the table along with LOB data and LOB index segments (associated with this table)

which the user explicitly specifies. If not specified, the default is to not move the LOB data and LOB index segments.




---------------------------Detect all Tables with Chained and Migrated Rows------------------------


1) Analyze all or only your Tables


SELECT 'ANALYZE TABLE '||table_name||' LIST CHAINED ROWS INTO CHAINED_ROWS;' FROM user_tables;


Analyze only chained rows tables

SELECT owner, table_name, chain_cnt FROM dba_tables WHERE owner='LDBO' and chain_cnt > 0;


set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\chained_statistics.sql;

SELECT 'ANALYZE TABLE ' ||table_name||' LIST CHAINED ROWS INTO CHAINED_ROWS;'
FROM dba_tables WHERE owner='LDBO' and chain_cnt > 0;

spool off



2) Alter Table ......Move

set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\defrag.sql;

SELECT DISTINCT 'ALTER TABLE ' ||table_name|| FROM CHAINED_ROWS;
spool off


or

select sum(bytes/1024/1024) "FOR INITIAL VALUE OR MORE"
from dba_segments
where owner = 'LDBO'
and segment_name = 'TBLOPTIONACCESSHISTORY';


SELECT DISTINCT 'ALTER TABLE ' ||table_name||' MOVE PCTFREE 20 PCTUSED 40 STORAGE (INITIAL 20K NEXT 40K MINEXTENTS 2 MAXEXTENTS 20 PCTINCREASE 0);' FROM

CHAINED_ROWS;


3) Rebuild Indexes because these tables’s indexes are in unstable state.

connect deltek/xxx@fin;
set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\rebuild_index.sql;

SELECT 'ALTER INDEX ' ||INDEX_NAME||' REBUILD;' FROM DBA_INDEXES WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);
spool off

4) Analyze Tables for compute statistics after defragmentation

set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\compute_stat.sql;

SELECT 'ANALYZE TABLE '||table_name||' COMPUTE STATISTICS;' FROM user_tables WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);

spool off


5) Show the RowIDs for all chained rows

This will allow you to quickly see how much of a problem chaining is in each table. If chaining is prevalent in a table, then that table should be rebuild

with a higher value for PCTFREE

SELECT owner_name,
table_name,
count(head_rowid) row_count
FROM chained_rows
GROUP BY owner_name,table_name
/



6) SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;

SELECT dbms_rowid.rowid_block_number(rowid) "Block-Nr", count(*) "Rows"
FROM row_mig_chain_demo
GROUP BY dbms_rowid.rowid_block_number(rowid) order by 1;


delete FROM chained_rows;

Wednesday, December 21, 2011

Change Snapshot Setting

select * from dba_hist_wr_control;


BEGIN
DBMS_WORKLOAD_REPOSITORY.modify_snapshot_settings(
retention => 66240, -- = 46 Days
interval => 15) -- = 15 Minutes
;
END;
/

Tuesday, December 20, 2011

Detect Row Chaining, Migrated Row and Avoid it

--------detecting chained row-----

This query will show how many chained (and migrated) rows each table has:

SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;


-------------------

SELECT a.name, b.value
FROM v$statname a, v$mystat b
WHERE a.statistic# = b.statistic#
AND lower(a.name) = 'table fetch continued row';

----------------------------------------------------------------------------------------

SELECT 'Chained or Migrated Rows = '||value FROM v$sysstat WHERE name = 'table fetch continued row';

Result:
Chained or Migrated Rows = 31637

Explain:
You could have 1 table with 1 chained row that was fetched 31'637 times. You could have 31'637 tables, each with a chained row, each of which was fetched once. You could have any combination of the above -- any combo.

---------------------------------------------------------------------------------

How many Rows in a Table are chained?

ANALYZE TABLE row_mig_chain_demo COMPUTE STATISTICS;

SELECT chain_cnt,
round(chain_cnt/num_rows*100,2) pct_chained,
avg_row_len, pct_free , pct_used
FROM user_tables
WHERE table_name = 'ROW_MIG_CHAIN_DEMO';

CHAIN_CNT PCT_CHAINED AVG_ROW_LEN PCT_FREE PCT_USED
---------- ----------- ----------- ---------- ----------
3 100 3691 10 40

PCT_CHAINED shows 100% which means all rows are chained or migrated.

------------------------------------------------------------------------------------------------


List Chained Rows

You can look at the chained and migrated rows of a table using the ANALYZE statement with the LIST CHAINED ROWS clause. The results of this statement are stored in a specified table created explicitly to accept the information returned by the LIST CHAINED ROWS clause. These results are useful in determining whether you have enough room for updates to rows.

Creating a CHAINED_ROWS Table

To create the table to accept data returned by an ANALYZE ... LIST CHAINED ROWS statement, execute the UTLCHAIN.SQL or UTLCHN1.SQL script in $ORACLE_HOME/rdbms/admin. These scripts are provided by the database. They create a table named CHAINED_ROWS in the schema of the user submitting the script.


D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlchain.sql will create following table


create table CHAINED_ROWS (
owner_name varchar2(30),
table_name varchar2(30),
cluster_name varchar2(30),
partition_name varchar2(30),
subpartition_name varchar2(30),
head_rowid rowid,
analyze_timestamp date
);

After a CHAINED_ROWS table is created, you specify it in the INTO clause of the ANALYZE statement.

ANALYZE TABLE row_mig_chain_demo LIST CHAINED ROWS;

SELECT owner_name,table_name, head_rowid FROM chained_rows;


-----------------------------How to avoid Chained and Migrated Rows?--------------------------


Increasing PCTFREE can help to avoid migrated rows. If you leave more free space available in the block, then the row has room to grow. You can also reorganize or re-create tables and indexes that have high deletion rates. If tables frequently have rows deleted, then data blocks can have partially free space in them. If rows are inserted and later expanded, then the inserted rows might land in blocks with deleted rows but still not have enough room to expand. Reorganizing the table ensures that the main free space is totally empty blocks.

The ALTER TABLE ... MOVE statement enables you to relocate data of a nonpartitioned table or of a partition of a partitioned table into a new segment, and optionally into a different tablespace for which you have quota. This statement also lets you modify any of the storage attributes of the table or partition, including those which cannot be modified using ALTER TABLE. You can also use the ALTER TABLE ... MOVE statement with the COMPRESS keyword to store the new segment using table compression.

ALTER TABLE MOVE

First count the number of Rows per Block before the ALTER TABLE MOVE

SELECT dbms_rowid.rowid_block_number(rowid) "Block-Nr", count(*) "Rows"
FROM row_mig_chain_demo
GROUP BY dbms_rowid.rowid_block_number(rowid) order by 1;

Block-Nr Rows
---------- ----------
2066 3

Now, de-chain the table, the ALTER TABLE MOVE rebuilds the row_mig_chain_demo table in a new segment, specifying new storage parameters:

SELECT distinct table_name FROM CHAINED_ROWS;


ALTER TABLE tbloptionaccesshistory MOVE
PCTFREE 20
PCTUSED 40
STORAGE (INITIAL 20K
NEXT 40K
MINEXTENTS 2
MAXEXTENTS 20
PCTINCREASE 0);

Table altered.

Again count the number of Rows per Block after the ALTER TABLE MOVE

SELECT dbms_rowid.rowid_block_number(rowid) "Block-Nr", count(*) "Rows"
FROM tbloptionaccesshistory
GROUP BY dbms_rowid.rowid_block_number(rowid) order by 1;


Rebuild the Indexes for the Table

Moving a table changes the rowids of the rows in the table. This causes indexes on the table to be marked UNUSABLE, and DML accessing the table using these indexes will receive an ORA-01502 error. The indexes on the table must be dropped or rebuilt. Likewise, any statistics for the table become invalid and new statistics should be collected after moving the table.

ANALYZE TABLE row_mig_chain_demo COMPUTE STATISTICS;

ERROR at line 1:
ORA-01502: index 'SCOTT.SYS_C003228' or partition of such index is in unusable
state


This is the primary key of the table which must be rebuilt.

ALTER INDEX SYS_C003228 REBUILD;
Index altered.

------------

SELECT 'ALTER INDEX ' ||INDEX_NAME||' REBUILD;' FROM DBA_INDEXES WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);


-------------

ANALYZE TABLE row_mig_chain_demo COMPUTE STATISTICS;
Table analyzed.


---------------------


SELECT 'ANALYZE TABLE '||table_name||' COMPUTE STATISTICS;' FROM user_tables WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);



------------------------



SELECT chain_cnt,
round(chain_cnt/num_rows*100,2) pct_chained,
avg_row_len, pct_free , pct_used
FROM user_tables
WHERE TABLE_NAME IN (
SELECT distinct table_name FROM CHAINED_ROWS);



CHAIN_CNT PCT_CHAINED AVG_ROW_LEN PCT_FREE PCT_USED
---------- ----------- ----------- ---------- ----------

If the table includes LOB column(s), this statement can be used to move the table along with LOB data and LOB index segments (associated with this table) which the user explicitly specifies. If not specified, the default is to not move the LOB data and LOB index segments.




---------------

SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;

-----------------


---------------------------Detect all Tables with Chained and Migrated Rows------------------------


1) Analyse all or only your Tables

SELECT 'ANALYZE TABLE '||table_name||' LIST CHAINED ROWS INTO CHAINED_ROWS;'
FROM user_tables
/



SELECT owner, table_name, chain_cnt FROM dba_tables WHERE owner='LDBO' and chain_cnt > 0;

SELECT 'ANALYZE TABLE ' ||table_name||' LIST CHAINED ROWS INTO CHAINED_ROWS;'
FROM dba_tables WHERE owner='LDBO' and chain_cnt > 0
/


SELECT distinct table_name FROM CHAINED_ROWS;

2) Alter Table ......Move

SELECT DISTINCT 'ALTER TABLE ' ||table_name||' MOVE PCTFREE 20 PCTUSED 40 STORAGE (INITIAL 20K NEXT 40K MINEXTENTS 2 MAXEXTENTS 20 PCTINCREASE 0);' FROM CHAINED_ROWS;


3) Rebuild Indexes
SELECT 'ALTER INDEX ' ||INDEX_NAME||' REBUILD;' FROM DBA_INDEXES WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);


4) Analyze Tables
SELECT 'ANALYZE TABLE '||table_name||' COMPUTE STATISTICS;' FROM user_tables WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);

5) Show the RowIDs for all chained rows

This will allow you to quickly see how much of a problem chaining is in each table. If chaining is prevalent in a table, then that table should be rebuild with a higher value for PCTFREE

SELECT owner_name,
table_name,
count(head_rowid) row_count
FROM chained_rows
GROUP BY owner_name,table_name
/



6) SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;




Conclusion

Migrated rows affect OLTP systems which use indexed reads to read singleton rows. In the worst case, you can add an extra I/O to all reads which would be really bad. Truly chained rows affect index reads and full table scans.

Row migration is typically caused by UPDATE operation

Row chaining is typically caused by INSERT operation.

SQL statements which are creating/querying these chained/migrated rows will degrade the performance due to more I/O work.

To diagnose chained/migrated rows use ANALYZE command , query V$SYSSTAT view

To remove chained/migrated rows use higher PCTFREE using ALTER TABLE MOVE.

Saturday, December 17, 2011

Move segments from one Tablespace to another

Move Tables of user PROD_USER like this:
Tables + indexes of tables EMP,PRODUCTS,CUSTOMERS into tablespace TBS1.
All the other tables + indexes of this user into tablespace TBS2.



set serveroutput on

--***********************************************
-- (Run the script as DBA user)
-- Parameters:
---------------
-- user_name : owner to which to move segments
-- TBS1 : Tablespace-A
-- Tables_TBS1 : list of tables to move to tablespace-A
-- TBS2 : tablespace to move all tables NOT in the list
-- put_Output : if 'true' - create output of operations (dbms_output)
-- put_Execute : if 'true' - execute the move operations
--***********************************************
declare
User_Name varchar2(20) default 'PROD_USER';
TBS1 varchar2(20) default 'TBS1';
Tables_TBS1 varchar2(1000) default 'EMP,PRODUCTS,CUSTOMERS';
TBS2 varchar2(20) default 'TBS2';
put_Output boolean default true;
put_Execute boolean default true;
Sort_memory number default 10000000;
TBS varchar2(20);
begin
Tables_TBS1 := upper(','||Tables_TBS1||',');
execute immediate 'alter session set sort_area_size = '||to_char(Sort_memory);
for crs in (select distinct s.owner, s.segment_name, s.partition_name, s.tablespace_name, s.segment_type from dba_segments s where owner like User_Name and segment_type in ('TABLE','TABLE PARTITION','TABLE SUBPARTITION')) loop
if instr(Tables_TBS1,','||crs.segment_name||',') != 0 then
TBS := TBS1;
else
TBS := TBS2;
end if;
if crs.tablespace_name = TBS then
--------------------------------------------------
-- Table is already in the correct tablespace.
-- check only indexes.
--------------------------------------------------
for crs2 in (select distinct s.owner, s.segment_name, s.partition_name, s.tablespace_name, s.segment_type from dba_indexes i, dba_segments s
where i.table_owner=crs.owner and i.table_name = crs.segment_name and s.segment_type in ('INDEX','INDEX PARTITION','INDEX SUBPARTITION')
and s.owner = i.owner and s.segment_name = i.index_name and (s.partition_name = crs.partition_name or s.partition_name is null and crs.partition_name is null)) loop
if instr(Tables_TBS1,','||crs.segment_name||',') != 0 then
TBS := TBS1;
else
TBS := TBS2;
end if;
if crs2.tablespace_name != TBS then
if crs2.segment_type in ('INDEX PARTITION') then
if put_Output then dbms_output.put_line ('> INDEX PARTITION '||crs2.owner||'.'||crs2.segment_name||':'||crs2.partition_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter index '||crs2.owner||'.'||crs2.segment_name||' rebuild partition '||crs2.partition_name ||' tablespace '||TBS; end if;
elsif crs2.segment_type in ('INDEX SUBPARTITION') then
if put_Output then dbms_output.put_line ('> INDEX SUBPARTITION '||crs2.owner||'.'||crs2.segment_name||':'||crs2.partition_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter index '||crs2.owner||'.'||crs2.segment_name||' rebuild subpartition '||crs2.partition_name ||' tablespace '||TBS; end if;
elsif crs2.segment_type = 'INDEX' then
if put_Output then dbms_output.put_line ('> INDEX '||crs2.owner||'.'||crs2.segment_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter index '||crs2.owner||'.'||crs2.segment_name||' rebuild tablespace '||TBS; end if;
end if;
end if;
end loop;
else
--------------------------------------------------
-- Move Table AND all rebuild ALL the indexes.
--------------------------------------------------
if crs.segment_type in ('TABLE PARTITION') then
if put_Output then dbms_output.put_line ('TABLE PARTITION '||crs.owner||'.'||crs.segment_name||':'||crs.partition_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter table '||crs.owner||'.'||crs.segment_name||' move partition '||crs.partition_name ||' tablespace '||TBS; end if;
elsif crs.segment_type in ('TABLE SUBPARTITION') then
if put_Output then dbms_output.put_line ('TABLE SUBPARTITION '||crs.owner||'.'||crs.segment_name||':'||crs.partition_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter table '||crs.owner||'.'||crs.segment_name||' move subpartition '||crs.partition_name ||' tablespace '||TBS; end if;
elsif crs.segment_type = 'TABLE' then
if put_Output then dbms_output.put_line ('TABLE '||crs.owner||'.'||crs.segment_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter table '||crs.owner||'.'||crs.segment_name||' move tablespace '||TBS; end if;
end if;
for crs2 in (select distinct s.owner, s.segment_name, s.partition_name, s.tablespace_name, s.segment_type from dba_indexes i, dba_segments s
where i.table_owner=crs.owner and i.table_name = crs.segment_name and s.segment_type in ('INDEX','INDEX PARTITION','INDEX SUBPARTITION')
and s.owner = i.owner and s.segment_name = i.index_name and (s.partition_name = crs.partition_name or s.partition_name is null and crs.partition_name is null)) loop
if crs2.segment_type in ('INDEX PARTITION') then
if put_Output then dbms_output.put_line ('> INDEX PARTITION '||crs2.owner||'.'||crs2.segment_name||':'||crs2.partition_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter index '||crs2.owner||'.'||crs2.segment_name||' rebuild partition '||crs2.partition_name ||' tablespace '||TBS; end if;
elsif crs2.segment_type in ('INDEX SUBPARTITION') then
if put_Output then dbms_output.put_line ('> INDEX SUBPARTITION '||crs2.owner||'.'||crs2.segment_name||':'||crs2.partition_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter index '||crs2.owner||'.'||crs2.segment_name||' rebuild subpartition '||crs2.partition_name ||' tablespace '||TBS; end if;
elsif crs2.segment_type = 'INDEX' then
if put_Output then dbms_output.put_line ('> INDEX '||crs2.owner||'.'||crs2.segment_name||' -> '||TBS); end if;
if put_Execute then execute immediate 'alter index '||crs2.owner||'.'||crs2.segment_name||' rebuild tablespace '||TBS; end if;
end if;
end loop;
end if;
end loop;
end;
/

Thursday, December 15, 2011

EMAIL NOTIFICATION changes in init.ora parameters


Auditing changes to init.ora parameters (via pfile or spfile) is an important DBA task. Sometimes, users which have “alter system” privilege can make unauthorized changes to the initialization parameters in the spfile on a production database. Hence, auditing changes to parameters is a critical DBA task. Fortunately, it's quite simple to audit these changes by implementing the audit_sys_operations=true.

Here is a method to track changes to the initialization parameters. In order to track all changes to parameters we can use audit for thealter system statement for any specific user

We should follow below steps to track changes to init.ora parameters:

1. ALTER SYSTEM SET audit_trail=db SCOPE=SPFILE;
2. SHUTDOWN IMMEDIATE
3. STARTUP
4. CREATE USER TEST IDENTIFIED BY TEST;
5. GRANT DBA TO TEST;
6. AUDIT ALTER SYSTEM BY test;
7. CONN TEST/TEST
8. ALTER SYSTEM SET AUDIT_TRAIL=db SCOPE=SPFILE;

9. Create an alert script to notify the DBA when a parameter has changed.

Let's start by finding the action_name in the dba_audit_trail view for the alter system command:

SQL> select username, timestamp, action_name from dba_audit_trail;

USERNAME TIMESTAMP ACTION_NAME
------------------------------ ------------- ----------------------------
TEST 29-MAY-09 ALTER SYSTEM


STEP 1
- We can track changes made by SYS user by setting audit_sys_operations parameter to TRUE.

SQL> alter system set audit_sys_operations=true scope=spfile;
System altered.

STEP 2 - Next, we bounce the instance to make the change take effect:
SQL> shutdown immediate
Database closed.
Database dismounted.
ORACLE instance shut down.

SQL> startup
ORACLE instance started.
Total System Global Area 285212672 bytes
Fixed Size 1218992 bytes
Variable Size 92276304 bytes
Database Buffers 188743680 bytes
Redo Buffers 2973696 bytes
Database mounted.
Database opened.

Here we see our auditing parameters:
SQL> show parameter audit
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
audit_file_dest string /home/oracle/oracle/product/10 .2.0/db_1/admin/fkhalid/adump
audit_sys_operations boolean TRUE
audit_syslog_level string
audit_trail string DB

SQL> alter system set audit_trail=db scope=spfile;
System altered.

STEP 3 - Here we go to the adump directory and examine the audit files:
SQL> host
[oracle@localhost bin]$ cd /home/oracle/oracle/product/10.2.0/db_1/admin/kam/adump/

[oracle@localhost adump]$ ls
ora_5449.aud ora_5476.aud ora_5477.aud ora_5548.aud ora_5575.aud ora_5576.aud

[oracle@localhost adump]$ cat ora_5576.aud
Audit file /home/oracle/oracle/product/10.2.0/db_1/admin/kam/adump/ora_5576.aud
Oracle Database 10g Enterprise Edition Release 10.2.0.1.0 - Production
With the Partitioning, OLAP and Data Mining options
ORACLE_HOME = /home/oracle/oracle/product/10.2.0/db_1/
System name: Linux
Node name: localhost.localdomain
Release: 2.6.18-92.el5
Version: #1 SMP Tue Jun 10 18:49:47 EDT 2008
Machine: i686
Instance name: kam
Redo thread mounted by this instance: 1
Oracle process number: 15
Unix process pid: 5576, image: oracle@localhost.localdomain (TNS V1-V3)
Fri May 29 02:38:30 2009
ACTION : 'alter system set audit_trail=db scope=spfile'
DATABASE USER: '/'
PRIVILEGE : SYSDBA
CLIENT USER: oracle
CLIENT TERMINAL: pts/2
STATUS: 0

STEP 4 - Now, create a crontab job to seek new entries in the adump directory.
#******************************************************
# list the full-names of all possible adump files . . . .
#******************************************************
rm -f /tmp/audit_list.lst
find $DBA/$ORACLE_SID/adump/*.trc -mtime -1 -print >> /tmp/audit_list.lst
STEP 5 - When found, send the DBA an e-mail:
# If initialization paramneter has changed, send an e-mail
if [ -f /tmp/audit_list.lst]; then
then
# Now, be sure that we don't clog the mailbox.
# the following statement checks to look for existing mail,
# and only sends mail when mailbox is empty . . .
if [ ! -s /var/spool/mail/oramy_sid ]
then
cat /oracle/MY_SID/scripts/oracheck.log | mail oramy_sid
fi
sendmail . . .
fi

Please beware that using the auditing command imposes additional work on the production database.

How to fix - ORA-12514

This simple two part procedure will help to diagnose and fix the most common sqlnet and tnsnames configuration problems.

1. Test communication between the client and the listener

We will use tnsping to complete this step. It's a common misconception that tnsping tests connectivity to the instance. In actual fact, it only tests connectivity to the listener.

Here, we will use it to prove that a) the tnsnames.ora has the correct hostname and port, and b) that there is a listener listening on the specified host and port. Run tnsping:

tnsping If it is successful you will see something like this:

oracle@bloo$ tnspinng scr9

Used TNSNAMES adapter to resolve the alias
Attempting to contact (DESCRIPTION = (ADDRESS_LIST = (ADDRESS =
(PROTOCOL = TCP) (HOST = bloo)(PORT = 1521))) (CONNECT_DATA =
(SERVER = DEDICATED) (SERVICE_NAME = scr9)))
OK (40 msec)
If not, here are some common errors, and some suggestions for fixing them:

TNS-03505: Failed to resolve name
The specified database name was not found in the tnsnames.ora, onames or ldap. This means that tnsping hasn't even got as far as trying to make contact with a server - it simply can't find any record of the database that you are trying to tnsping. Make sure that you've spelled the database name correctly, and that it has an entry in the tnsnames.ora.

If you have a sqlnet.ora, look at for the setting NAMES.DEFAULT_DOMAIN. If it is set, then all entries in your tnsnames.ora must have a matching domain suffix.

TNS-12545: Connect failed because target host or object does not exist
The host specified in the tnsnames is not contactable. Verify that you have spelled the host name correctly. If you have, try pinging the host with 'ping '. If ping returns 'unknown host', speak to your network admin. It might be that you have a DNS issue (you could try using the IP address if you have it to hand). If you get 'host unreachable', again speak to your network person, the problem could be down to a routing or firewall issue.

TNS-12541: TNS:no listener
The hostname was valid but the listener was not contactable. Things to check are that the tnsnames has the correct port (and hostname) specified, and that the listener is running on the server and using the correct port.

tnsping hangs for a long time
I've seen this happen in situations where there is something listening on the host/port - but it isn't an oracle listener. Make sure you have specified the correct port, and that your listener is running. If all looks ok, try doing a 'netstat -ap | grep 1521' (or whatever port you are using) to find out what program is listening on that port.

2. Attempt a connection to the instance
Once you have proven that the tnsnames is talking to the listener properly, the next step is to attempt a full connection to the instance. To do this we.ll use sqlplus:

sqlplus [username]/[password]@
If it works you will successfully log into the instance. If not, here are some common errors:

ORA-01017: invalid username/password; logon denied
This is actually a good error in these circumstances! Even though you didn't use the correct username or password, you must have successfully made contact with the instance.

ORA-12505: TNS:listener does not currently know of SID given in connect
Either the SID is misspelled in the tnsnames, or the listener isn't listening for it. Check the tnsnames.ora first. If it looks ok, do a 'lsnrctl status' on your server, to see what databases the listener is listening for.

ORA-12514: TNS:listener could not resolve SERVICE_NAME given in connect
This is quite a common error and it means that, while the listener was contactable, the database (or rather the service) specified in the tnsnames wasn't one of the things that it was listening out for.
Begin by looking at your tnsnames.ora. In it, you will a setting like SERVICE_NAME=.

If you are running a single instance database (ie. not RAC), and you are sure that you are not using services, it might be easier to change SERVICE_NAME= to SID= in your tnsnames. Using service names is the more modern way of doing things, and it does have benefits, but SID still works perfectly well (for now anyway).
If you would prefer to continue using service names, you must first check that you have not misspelled the service name in your tnsnames. If it looks alright, next check that the listener is listening for the service. Do this by running 'lsnrctl services' on your server. If there isn't an entry for your service, you need to make sure that the service_names parameter is set correctly on the database.


Wednesday, December 14, 2011

Missing ArchiveLog at Standby server

Issue : Archive log gap at Standby end. A standby was created from cold backup of Production Database.

SOLUTION: go for an incremental backup from the current scn at my standby till the production’s scn.

The following solution can be useful when there is a gap of some archive log at standby which was physically removed/delted at production end, And it was not applied to standby database.


SQL> select max(sequence#) from v$archived_log;

SQL> SELECT CURRENT_SCN FROM V$DATABASE;

SQL>SELECT to_char(CURRENT_SCN) FROM V$DATABASE;





SQLPLUSW SYS/ORACLE@ORCLSBY AS SYSDBA
SQLSBY>SHUT IMMEDIATE
SQLSBY>startup nomount;
SQLSBY>alter database mount standby database;
SQLSBY>recover managed standby database disconnect from session;


STILL ERROR IN RECOVERY THEN USE INCREMENTAL BACKUP


SQLSBY>select max(sequence#) from v$log_history;




SQLSBY>recover managed standby database cancel;
ORA-16136: Managed Standby Recovery not active


SQLSBY>ALTER DATABASE RECOVER managed standby database cancel


confirm from the view v$managed_standby to see if the MRP(managed recovery process) is running or not
SQLSBY>select process from v$managed_standby;

PROCESS
---------
ARCH
ARCH
RFS (remote file server)

SQLSBY>recover standby database;

ORA-00279: change 25738819980 generated at 04/23/2011 16:13:24 needed for thread 1
ORA-00289: suggestion : C:\DBDR\ARCHIVE_DBDR\ARC_0741355170_00311_001
ORA-00280: change 25738819980 for thread 1 is in sequence #311


SQLSBY>SELECT SEQUENCE#, APPLIED FROM V$ARCHIVED_LOG;

SEQUENCE# APP
----------- -----
311 NO
312 NO
313 NO
314 NO



SQLSBY>SELECT CURRENT_SCN FROM V$DATABASE;

SQLSBY>SELECT to_char(CURRENT_SCN) FROM V$DATABASE;
TO_CHAR(CURRENT_SCN)
----------------------------------------
25738819979



RMAN Incremental backup from SCN of standby database

Taking the incremental backup of production database from the current scn (25738819979) at standby database and applying the same at standby end.


RMAN> BACKUP INCREMENTAL FROM SCN 25738824328 DATABASE FORMAT 'C:\DBDR\DBDR_%U' tag 'Archive_Gap';


Now applying the incremental backup to standby database. Catalog the backuppiece with standby.

RMAN> CATALOG BACKUPPIECE 'C:\DBDR\DBDR_0DMAO5OR_1_1';

RMAN> RECOVER DATABASE NOREDO;

RMAN> DELETE BACKUP TAG 'Archive_Gap';


SQLSBY> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;

SQLSBY> select process from v$managed_standby;

PROCESS
---------
ARCH
ARCH
RFS
MRP0

SQLSBY> SELECT SEQUENCE#, APPLIED FROM V$ARCHIVED_LOG;

SEQUENCE# APP
---------- ---
311 YES
312 YES
313 YES
314 YES


We can see that the standby is now in sync with the production.




SQL>alter system switch logfile;


SQL>select max(sequence#) from v$archived_log;



SQLSBY>select max(sequence#) from v$log_history;

SQLSBY>SELECT SEQUENCE#, APPLIED FROM V$ARCHIVED_LOG;

Switchover and Failover steps

SWITCH OVER
1. SELECT SWITCHOVER_STATUS FROM V$DATABASE;
SWITCHOVER_STATUS
-----------------
TO STANDBY
1 row selected

2. ALTER DATABASE COMMIT TO SWITCHOVER TO PHYSICAL STANDBY;

3. SHUTDOWN IMMEDIATE;
STARTUP MOUNT;

4. SELECT SWITCHOVER_STATUS FROM V$DATABASE;

SWITCHOVER_STATUS
------------
TO_PRIAMRY


5. at standby ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY;


6. alter database open [if db opened read only since last time it was started]
else shutdown and restart

7. ALTER SYSTEM SWITCH LOGFILE;



FAILOVER

First resolve gap:

A) Identify and resolve any gaps in the archived redo log files.
SQL> SELECT THREAD#, LOW_SEQUENCE#, HIGH_SEQUENCE# FROM V$ARCHIVE_GAP;
THREAD# LOW_SEQUENCE# HIGH_SEQUENCE#
---------- ------------- --------------
1 90 92


ALTER DATABASE REGISTER PHYSICAL LOGFILE 'filespec1';

B) Repeat A) until all gaps are resolved.


C) Copy any other missing archived redo log files.

SQL> SELECT UNIQUE THREAD# AS THREAD, MAX(SEQUENCE#)
2> OVER (PARTITION BY thread#) AS LAST from V$ARCHIVED_LOG;
THREAD LAST
---------- ----------
1 100

ALTER DATABASE REGISTER PHYSICAL LOGFILE 'filespec1';

now initiate failover at standby

1. ALTER DATABASE RECOVER MANAGED STANDBY DATABASE FINISH FORCE;

2. ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY;

3. alter database open [if db opened read only since last time it was started]
else shutdown and restart

Tuesday, December 13, 2011

upgrade Oracle 10g to 11g using DBUA


Prerequistics

1) oracle 10g version should be above or equal to 10.2.0.4. if not please upgrade it to 10.2.0.4


SQL> select banner from v$version;


2) timezone should be >=4

SELECT CASE COUNT(DISTINCT(tzname))
WHEN 183 then 1
WHEN 355 then 1
WHEN 347 then 1
WHEN 377 then 2
WHEN 186 then case COUNT(tzname) WHEN 636 then 2 WHEN 626 then 3 ELSE 0 end
WHEN 185 then 3
WHEN 386 then 3
WHEN 387 then case COUNT(tzname) WHEN 1438 then 3 ELSE 0 end
WHEN 391 then case COUNT(tzname) WHEN 1457 then 4 ELSE 0 end
WHEN 392 then case COUNT(tzname) WHEN 1458 then 4 ELSE 0 end
WHEN 188 then case COUNT(tzname) WHEN 637 then 4 ELSE 0 end
WHEN 189 then case COUNT(tzname) WHEN 638 then 4 ELSE 0 end
ELSE 0 end VERSION
FROM v$timezone_names;


3) Stop all batch jobs

4) Ensure no files need media recovery

sqlplus "/ as sysdba"
SQL> SELECT * FROM v$recover_file;
This should return no rows.

5) Ensure no files are in backup mode:
SQL> SELECT * FROM v$backup WHERE status!='NOT ACTIVE';
This should return no rows.

6) Resolve any outstanding unresolved distributed transaction:
SQL> select * from dba_2pc_pending;

7) Ensure the users sys and system have 'system' as their default tablespace.
SQL> SELECT username, default_tablespace FROM dba_users WHERE username in ('SYS','SYSTEM');


8) Ensure that the aud$ is in the system tablespace when auditing is enabled.
SQL> SELECT tablespace_name FROM dba_tables WHERE table_name='AUD$';


09) Empty Recyclebin before start upgradation

sqlplus "/ as sysdba"

PURGE DBA_RECYCLEBIN;

select * from recyclebin;

10) Gathers dictionary and schema statistics

sqlplus "/ as sysdba"

EXEC DBMS_STATS.GATHER_DICTIONARY_STATS;

EXEC DBMS_STATS.GATHER_SCHEMA_STATS('SYS');

EXEC DBMS_STATS.GATHER_SCHEMA_STATS('SYSMAN');


EXEC DBMS_STATS.GATHER_SCHEMA_STATS('DPCDSL');

11) check invalid objects and resolve

select OWNER,OBJECT_NAME,OBJECT_TYPE from dba_objects where status='INVALID' ORDER BY OWNER;

-----------COMPILE INVALID OBJECTS

spool c:\temp\invalid.sql ;
select OBJECT_NAME from dba_objects where owner='LDBO' AND STATUS='INVALID'

select
'ALTER ' || OBJECT_TYPE || ' ' ||
OWNER || '.' || OBJECT_NAME || ' COMPILE;'
from
dba_objects
where
status = 'INVALID'
and
object_type in ('PACKAGE','FUNCTION','PROCEDURE','VIEW','TRIGGER')
;
spool out ;
@ c:\temp\invalid.sql ;

-----

12) Perform Cold Backup
sql>shutdown immediate
backup physical file folder to safe location in case of disaster


13) Check for adequate freespace on archive log destination file systems. Note that if your database is in archivelog mode, then it is always desirable and advisable to upgrade the database in noarchivelog mode as that will reduce the time taken to upgrade the database. After the upgrade you can again put the database in the archivelog mode.

sqlplus "/ as sysdba"

SQL> SELECT LOG_MODE FROM SYS.V$DATABASE;

SQL> startup mount;

SQL> alter database noarchivelog;


SQL> alter database open;


14)
Install oracle 11g software without using create database also without upgrade option

15)

Oracle Database 11.1 Pre-Upgrade Information Tool for checking any error

Copy file : utlu112i.sql to C:\temp (any temporary location) from “D:\app\Administrator\product\11.2.0\dbhome_1\RDBMS\ADMIN\utlu112i.sql” .

Connected into Oracle 10g instance while DB is up and running and execute the utlu112i.sql file from SQL prompt.

sqlplus "/ as sysdba"

SQL10g> @C:\temp\utlu112i.sql


SQL10g>select * from registry$sys_inv_objs;


SQL10g>select * from registry$nonsys_inv_objs;


resolve all errors

16)

Final) Now lets perform the Database upgrade steps using : Database Upgrade Assistant(DBUA) a GUI to upgrade the DB and it can be found under Start -) All Programs -) Oracle 11g Home -) Configuration and Migration Tool -) Database Upgrade Assistant


17) Version Validation

SQL11g>select comp_name,version,status from dba_registry;

SELECT * FROM dba_registry_log;

SELECT * FROM sys.registry$error;
SQL11g>select banner from v$version;

18) repeat Oracle Database 11.2 Pre-Upgrade Information Tool until all error resolved

SQL11g>sqlplus "/ as sysdba"

SQL11g> @C:\temp\utlu112i.sql


19) Use utluiobj.sql after the upgrade to identify any new invalid

Oracle Database 11.1 Post-Upgrade Invalid Objects Tool

This tool lists post-upgrade invalid objects that were not invalid prior to upgrade (it ignores pre-existing pre-upgrade invalid objects).


SQL11g>@D:\app\Administrator\product\11.2.0\dbhome_1\RDBMS\ADMIN\utluiobj.sql


---------------------
SQL11g> @D:\app\Administrator\product\11.2.0\dbhome_1\RDBMS\ADMIN\utluiobj.sql
.
Oracle Database 11.1 Post-Upgrade Invalid Objects Tool 12-06-2011 11:38:59
.
This tool lists post-upgrade invalid objects that were not invalid
prior to upgrade (it ignores pre-existing pre-upgrade invalid objects).
.
Owner Object Name Object Type
.

PL/SQL procedure successfully completed.

--------------------------

20) result of upgrade

@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlu102s.sql

21) if owr is not installed and if require then instal manually

to install oracle workspace manager

select username from dba_users where username ='WMSYS';

ALTER USER WMSYS IDENTIFIED BY WMSYS ACCOUNT UNLOCK;

@D:\app\Administrator\product\11.2.0\dbhome_1\RDBMS\ADMIN\owminst.plb

select comp_name,version,status from dba_registry;

@D:\app\Administrator\product\11.2.0\dbhome_1\RDBMS\ADMIN\utlu112s.sql


Recover Database with allow corruption



Here is an example of block corruption faced during database recovery:

recover database;

RA-00600: internal error code, arguments: [3020], [48], [41103361], [41103361], [], [], [], [], [], [], [], []

ORA-10567: Redo is inconsistent with data block (file# 48, block# 41103361)
ORA-10564: tablespace MYTS
ORA-01110: data file 48: '+MYDISC/mydb/datafile/myts.14714.699899641'
ORA-10561: block type 'TRANSACTION MANAGED DATA BLOCK', data object# 323837
Errors in file /mytracepath/mydb_pr06_4728.trc:

You can skip recovery of this corrupted block by running following command:

recover database datafile '+MYDISC/mydb/datafile/myts.14714.699899641' allow 1 corruption;

If there are more then 1 corruption, you may repeat this step or run command with allow many corruption. However, dont forget to note corrupted file and block numbers. After opening database, we will use following query to find out to which segment corrupted block belogns to:

SELECT tablespace_name, segment_type, owner, segment_name

FROM dba_extents WHERE file_id = 48 and 41103361 between block_id AND block_id + blocks - 1;

generate move rebuild script to migrate partitioned tables from one tablespace to other

declare
cflag number;
oldDataTablespaceName varchar2(100);
newDataTablespaceName varchar2(100);
oldIndexTablespaceName varchar2(100);
newIndexTablespaceName varchar2(100);
parallel_level varchar2(2);
begin
DBMS_OUTPUT.ENABLE(1000000);
-------------------- SET VARIABLES ----------------------------
oldDataTablespaceName:=OLD_TABLESPACE';
newDataTablespaceName:='NEW_TABLESPACE';
newIndexTablespaceName:='NEW_INDEX_TABLESPACE';
parallel_level:='8';
---------------------------------------------------------------
dbms_output.put_line('alter session enable parallel ddl;');
for l in ( select table_owner, table_name, partition_name, partition_position from dba_tab_partitions where tablespace_name=oldDataTablespaceName order by table_owner, table_name, partition_position )
loop
cflag := 0;
dbms_output.put_line('alter table '||l.table_owner||'.'||l.table_name||' move partition "'||l.partition_name||'" tablespace '||newDataTablespaceName||' parallel '||parallel_level||';');
for k in (select dip.index_owner, dip.index_name, dip.partition_name from dba_ind_partitions dip, dba_indexes di where di.table_owner=l.table_owner and di.table_name=l.table_name and di.owner=dip.index_owner and di.index_name=dip.index_name and dip.partition_position=l.partition_position)
loop
dbms_output.put_line('alter index '||k.index_owner||'.'||k.index_name||' rebuild partition "'||k.partition_name||'" tablespace '||newIndexTablespaceName||' parallel '||parallel_level||';');
end loop;
end loop;
END;
/


Reason: Archivelog Generation Huge

There are many reasons for it and need to check out.
>Heavy DML statements.
>DML with Lobs and xml datatypes.
>Investigate the SQL by awr report.
> check the size of the redolog file it might be small.
> how many time log switch in an hour.
> there is specific time of archive log generations.

Resizing Recreating RedoLogs / Increase Redo log / Archivelog generation fast size


--------------------------------------------final steps--------------------------
select group#, status from v$log;

ALTER SYSTEM CHECKPOINT GLOBAL;

select group#, status from v$log;

alter database drop logfile group 1;

alter database add logfile group 1 ('F:\NBSD1112\REDO01.LOG') size 200M reuse ;

alter system switch logfile;
alter system switch logfile;

select group#, status from v$log;


1)
SELECT a.group#, a.member, b.bytes FROM v$logfile a, v$log b WHERE a.group# = b.group#;


Make the last redo log CURRENT

select group#, status from v$log;

alter system switch logfile;

select group#, status from v$log;


ALTER SYSTEM CHECKPOINT GLOBAL;

ALTER DATABASE DROP LOGFILE GROUP 1;



2) Re-create dropped online redo log group


alter database add logfile group 1 ('F:\NBSD1112\REDO01.LOG' ) size 200m reuse;



3)
select group#, status from v$log;


GROUP# STATUS
---------- ----------------
1 UNUSED
2 INACTIVE
3 CURRENT


Force another log switch

alter system switch logfile;



select group#, status from v$log;

GROUP# STATUS
---------- ----------------
1 CURRENT
2 INACTIVE
3 ACTIVE





4)
Loop back to Step 2 until all logs are rebuilt

alter database add logfile group 2 ('F:\NBSD1112\REDO02.LOG' ) size 200m reuse;




-----------------------------------SECOND METHOD-------------------

SELECT a.group#, a.member, b.bytes FROM v$logfile a, v$log b WHERE a.group# = b.group#;

GROUP# MEMBER BYTES
1 F:\NBSD1112\REDO01.LOG 52428800
2 F:\NBSD1112\REDO02.LOG 52428800
3 F:\NBSD1112\REDO03.LOG 52428800


Here is how i changed this to five 200M redo logs:

SQL> alter database add logfile group 4 ('F:\NBSD1112\REDO04.LOG') size 200M;
SQL> alter database add logfile group 5 ('F:\NBSD1112\REDO05.LOG') size 200M;

while running following sql commands, if you hit an error like this:

ORA-01623: log 3 is current log for instance RPTDB (thread 1) - cannot drop
ORA-00312: online log 3 thread 1: 'F:\NBSD1112\REDO03.LOG'

you should run " alter system switch logfile;" until current log is 4 or 5.

Then execute "alter system checkpoint;"

SQL> alter database drop logfile group 1;
SQL> alter database drop logfile group 2;
SQL> alter database drop logfile group 3;

then move (or maybe drop) old redo logs

RENAME F:\NBSD1112\REDO01.LOG F:\NBSD1112\REDO01_OLD.LOG
RENAME F:\NBSD1112\REDO02.LOG F:\NBSD1112\REDO02_OLD.LOG
RENAME F:\NBSD1112\REDO03.LOG F:\NBSD1112\REDO03_OLD.LOG

finally

SQL> alter database add logfile group 1 ('F:\NBSD1112\REDO01.LOG') size 200M;
SQL> alter database add logfile group 2 ('F:\NBSD1112\REDO02.LOG') size 200M;
SQL> alter database add logfile group 3 ('F:\NBSD1112\REDO03.LOG') size 200M;

Monday, December 12, 2011

Archivelog Frequency

A script to check the frequency of log switches


col MidN format 999
col 1AM format 999
col 2AM format 999
col 3AM format 999
col 4AM format 999
col 5AM format 999
col 6AM format 999
col 7AM format 999
col 8AM format 999
col 9AM format 999
col 10AM format 999
col 11AM format 999
col Noon format 999
col 1PM format 999
col 2PM format 999
col 3PM format 999
col 4PM format 999
col 5PM format 999
col 6PM format 999
col 7PM format 999
col 8PM format 999
col 9PM format 999
col 10PM format 999
col 11PM format 999
select to_char(first_time,'mm/dd/yy') logdate,
sum(decode(to_char(first_time,'hh24'),'00',1,0)) "MidN",
sum(decode(to_char(first_time,'hh24'),'01',1,0)) "1AM",
sum(decode(to_char(first_time,'hh24'),'02',1,0)) "2AM",
sum(decode(to_char(first_time,'hh24'),'03',1,0)) "3AM",
sum(decode(to_char(first_time,'hh24'),'04',1,0)) "4AM",
sum(decode(to_char(first_time,'hh24'),'05',1,0)) "5AM",
sum(decode(to_char(first_time,'hh24'),'06',1,0)) "6AM",
sum(decode(to_char(first_time,'hh24'),'07',1,0)) "7AM",
sum(decode(to_char(first_time,'hh24'),'08',1,0)) "8AM",
sum(decode(to_char(first_time,'hh24'),'09',1,0)) "9AM",
sum(decode(to_char(first_time,'hh24'),'10',1,0)) "10AM",
sum(decode(to_char(first_time,'hh24'),'11',1,0)) "11AM",
sum(decode(to_char(first_time,'hh24'),'12',1,0)) "Noon",
sum(decode(to_char(first_time,'hh24'),'13',1,0)) "1PM",
sum(decode(to_char(first_time,'hh24'),'14',1,0)) "2PM",
sum(decode(to_char(first_time,'hh24'),'15',1,0)) "3PM",
sum(decode(to_char(first_time,'hh24'),'16',1,0)) "4PM",
sum(decode(to_char(first_time,'hh24'),'17',1,0)) "5PM",
sum(decode(to_char(first_time,'hh24'),'18',1,0)) "6PM",
sum(decode(to_char(first_time,'hh24'),'19',1,0)) "7PM",
sum(decode(to_char(first_time,'hh24'),'20',1,0)) "8PM",
sum(decode(to_char(first_time,'hh24'),'21',1,0)) "9PM",
sum(decode(to_char(first_time,'hh24'),'22',1,0)) "10PM",
sum(decode(to_char(first_time,'hh24'),'23',1,0)) "11PM"
from v$log_history
group by to_char(first_time,'mm/dd/yy')
order by 1
/

sqlserver to oracle 11g connection



-----------odbcad32---------------------
datasource name (DSN) : dg4odbc
description: dg4odbc
sqlserver : 172.168.0.6

database: msajag
username: lduser
password: kshitij


test connection successfully

--------------------oracle------


Select * from v$parameter where name like 'global_names%';

alter system set global_names=false scope = both;

create public database link lnk_rkserver connect to lduser identified by kshitij using 'dg4odbc';

if ora 28500 error then username, password in double quotes

create public database link lnk_rkserver connect to "lduser" identified by "kshitij" using 'dg4odbc';


---D:\app\Administrator\product\11.2.0\dbhome_1\hs\admin\initdg4odbc.ora------

HS_FDS_CONNECT_INFO = dg4odbc
HS_FDS_TRACE_LEVEL = off


-----------listener-----------

(SID_DESC=
(SID_NAME=dg4odbc)
(ORACLE_HOME=D:\app\Administrator\product\11.2.0\dbhome_1)
(PROGRAM=dg4odbc)
)


-----------------tnsnames.ora-------------

dg4odbc =
(DESCRIPTION=
(ADDRESS=(PROTOCOL=tcp)(HOST=172.168.0.11)(PORT=1521))
(CONNECT_DATA=(SID=dg4odbc))
(HS=OK)
)



Thursday, December 8, 2011

EXPLAIN PLAN


SQL> explain plan for
2 SELECT * FROM TBLBANKDETAIL;

Explained.

SQL> set linesize 160
SQL> set pages 300
SQL> select * from table(dbms_xplan.display());

PLAN_TABLE_OUTPUT
-------------------------------------------------------------------------------------------------------------------
Plan hash value: 3594628191

-----------------------------------------------------------------------------------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
-----------------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 100K| 10M| 253 (1)| 00:00:04 |
| 1 | TABLE ACCESS FULL| TBLBANKDETAIL | 100K| 10M| 253 (1)| 00:00:04 |
-----------------------------------------------------------------------------------

8 rows selected.

SQL>

Ora-29257 UTL_INADDR.get_host_address


Ora-29257 When Trying to Use UTL_INADDR to Get IP Address or Host Name of a Server
Problem Description
When using either the get_host_address() or get_host_name() function of the UTL_INADDR package to get host information, an ORA-29257 error is returned like below:

ORA-29257: host notebook unknown
ORA-06512: at "SYS.UTL_INADDR", line 19
ORA-06512: at "SYS.UTL_INADDR", line 40
For example while installation Audit Vault Agent it fails with following error:

Error1: SQL Error: ORA-29257: host notebook unknown
ORA-06512: at "SYS.UTL_INADDR", line 19
ORA-06512: at "SYS.UTL_INADDR", line 40
ORA-06512: at "AVSYS.DBMS_AUDIT_VAULT_AGENT_INSTALL" line 55
ORA-06512: at line 1
Cause of the Problem

The functions in the UTL_INADDR package are essentially using the 'nslookup' command behind the scenes and the nslookup command uses the specified dns server to get the information. If you test the nslookup command and it fails then there is a configuration issue on the host machine and it has nothing to do with the the Oracle Database server. On unix machines, the /etc/resolv.conf identifies the DNS server to be used.
Solution of the Problem
Modify the /etc/resolv.conf to configure the DNS server resolution and once nslookup works, so will the UTL_INADDR package. Note that you must have DNS server in this case and the corresponding hostname must resolve DNS.

If you don't have DNS server you can use /etc/hosts file and add the entry of the host and IP address which is unknown.

Index DeFragmentation / Rebuild / Index Performance Tuning



Index Defragmentation Steps

Analyzing statistics
Validating the index
Checking PCT_USED
Dropping and rebuilding (or coalescing) the index

-----The following INDEX_STATS columns are especially useful:

height----- refers to the maximum number of levels encountered within the index.

lf_rows------- refers to the total number of leafs nodes in the index.

del_lf_rows --------refers to the number of leaf rows that have been marked deleted as a result of table DELETEs.

SQL>analyze index INDEX_NAME validate structure;


Then query INDEX_STATS view
1. If del_lf_rows/lf_rows is > .2 then index should be rebuild.
2. If height is 4 then index should be rebuild.
3. If lf_rows is lower than lf_blks then index should be rebuild.




----------validate index------

spool c:\index_validate.sql

select 'Analyze Index '||index_name||' validate structure;' from user_indexes;
spool off

@c:\index_validate.sql


-------------following query should be run after all analyze statement

Select Name,(DEL_LF_ROWS_LEN/LF_ROWS_LEN)*100 as index_usage From index_stats Where LF_ROWS_LEN!=0 order by Name ;


select DEL_LF_ROWS*100/decode(LF_ROWS, 0, 1, LF_ROWS) PCT_DELETED,(LF_ROWS-DISTINCT_KEYS)*100/ decode(LF_ROWS,0,1,LF_ROWS) DISTINCTIVENESS
from index_stats;

If the PCT_DELETED is 20% or higher, the index is candidate for rebuilding



-------following scripts validate the indexes and find to rebuild and rebuild them-------

set serveroutput on size 100000

DECLARE
vOwner dba_indexes.owner%TYPE; /* Index Owner */
vIdxName dba_indexes.index_name%TYPE; /* Index Name */
vAnalyze VARCHAR2(100); /* String of Analyze Stmt */
vCursor NUMBER; /* DBMS_SQL cursor */
vNumRows INTEGER; /* DBMS_SQL return rows */
vHeight index_stats.height%TYPE; /* Height of index tree */
vLfRows index_stats.lf_rows%TYPE; /* Index Leaf Rows */
vDLfRows index_stats.del_lf_rows%TYPE; /* Deleted Leaf Rows */
vDLfPerc NUMBER; /* Del lf Percentage */
vMaxHeight NUMBER; /* Max tree height */
vMaxDel NUMBER; /* Max del lf percentage */
CURSOR cGetIdx IS SELECT owner,index_name
FROM dba_indexes WHERE OWNER NOT LIKE 'SYS%';
BEGIN
/* Define maximums. This section can be customized. */
vMaxHeight := 3;
vMaxDel := 20;

/* For every index, validate structure */
OPEN cGetIdx;
LOOP
FETCH cGetIdx INTO vOwner,vIdxName;
EXIT WHEN cGetIdx%NOTFOUND;
/* Open DBMS_SQL cursor */
vCursor := DBMS_SQL.OPEN_CURSOR;
/* Set up dynamic string to validate structure */
vAnalyze := 'ANALYZE INDEX ' || vOwner || '.' || vIdxName || ' VALIDATE STRUCTURE';
DBMS_SQL.PARSE(vCursor,vAnalyze,DBMS_SQL.V7);
vNumRows := DBMS_SQL.EXECUTE(vCursor);
/* Close DBMS_SQL cursor */
DBMS_SQL.CLOSE_CURSOR(vCursor);
/* Does index need rebuilding? */
/* If so, then generate command */
SELECT height,lf_rows,del_lf_rows INTO vHeight,vLfRows,vDLfRows
FROM INDEX_STATS;
IF vDLfRows = 0 THEN /* handle case where div by zero */
vDLfPerc := 0;
ELSE
vDLfPerc := (vDLfRows / vLfRows) * 100;
END IF;
IF (vHeight > vMaxHeight) OR (vDLfPerc > vMaxDel) THEN
DBMS_OUTPUT.PUT_LINE('ALTER INDEX ' || vOwner || '.' || vIdxName || ' REBUILD;');
END IF;

END LOOP;
CLOSE cGetIdx;
END;
/

Wednesday, December 7, 2011

win error error 0x80070522


This issue might occur permissions are not set properly for the C Drive.

Make sure that you are logged in as Administrator.

To take ownership of C Drive, follow these steps:

1. Right-click the C Drive and then click Properties.
2. Click the Security tab.
3. Click Advanced, and then click the Owner tab.
4. In the Name list, click your user name, or click Administrator if you are logged in as Administrator, or click the Administrators group.
5. Click on Edit and then put a check mark against "Replace all existing inheritable permissions on all descendants with inheritable permissions from this object".
6. Click OK, and then click Yes when you receive the following message:
This will replace explicitly defined permissions on all descendants of this object with inheritable permissions from C-Drive (C:).
Do you wish to continue?
7. Once the permissions are replaced, click on Ok.

Restart computer

8. Check if you are able to copy, paste or delete any documents now.
If the above steps fail then you may also want to disable UAC and check if that helps.
1. Click Start, type msconfig in the start search box, and then press Enter.
If you are prompted for an administrator password or for a confirmation, type the password, or click Continue.
2. Click the Tools tab.
3. Click Disable UAC and then click Launch.
4. A command window appears, and shortly thereafter, a notification bubble appears informing you that UAC is disabled.
5. Click Ok and restart your computer to apply the change.

Monday, December 5, 2011

tns 12535 operation timed out

ping serverip
tnsping serverip
telnet serverip 1521


solution
allow port 1521 on server

Saturday, December 3, 2011

ORA-24005 must use DBMS_AQADM.DROP_QUEUE_TABLE to drop queue tables

I was getting this error when I was trying to drop a schema .
Sql> drop user test cascade;
ERROR at line 1:
ORA-00604: error occurred at recursive SQL level 1
ORA-24005: must use DBMS_AQADM.DROP_QUEUE_TABLE to drop queue tables
Logged in as Test user and checked for queue tables .
SQL> select * from user_queue_tables;
no rows selected

SQL> select OWNER, NAME, QUEUE_TABLE, ENQUEUE_ENABLED, DEQUEUE_ENABLED
from DBA_QUEUES where OWNER='TEST';
no rows selected
However I was able to locate few queue tables in the schema when I used
Sql> select table_name from user_tables;
Got few Tables starting with AQ$_ *******
Tried to delete these tables using the DBMS_AQADM.DROP_QUEUE_TABLE procedure . However ended with the following error message .
SQL> begin
2 DBMS_AQADM.DROP_QUEUE_TABLE(‘AQ$_test');
3 end;
4
5 /
begin
*
ERROR at line 1:
ORA-24002: QUEUE_TABLE SCOTT.AQ$_test does not exist
ORA-06512: at "SYS.DBMS_AQADM_SYS", line 4084
ORA-06512: at "SYS.DBMS_AQADM", line 197
ORA-06512: at line 2

Working solution :


Logged in as sys and issued the following command :

alter session set events '10851 trace name context forever, level 2';

and then dropped all the AQ$.***** tables from sys successfully.
Finally dropped the particular schema .



Buffer Cache Size

High CPU_COUNT and increased granule size can cause ORA-0431 error.

------CPU_COUNT specifies the number of CPUs available to Oracle. On single-CPU computers, the value of CPU_COUNT is 1.

Memory sizing depends on CPU_COUNT (No of processor groups).
Please use below formulas to calculate min buffer cache size

--Minimum Buffer Cache Size
10g : max(CPU_COUNT) * max(Granule size)
11g : max(4MB * CPU_COUNT)

Please note that If SGA_MAX_SIZE < 1GB then use Granule size = 4mb, SGA_MAX_SIZE > 1G then use Granule size = 8MB.

-- _PARALLEL_MIN_MESSAGE_POOL Size
If PARALLEL_AUTOMATIC_TUNING =TRUE then large pool is used for this area otherwise shared pool is used.

CPU_COUNT*PARALLEL_MAX_SERVERS*1.5*(OS msg bufferr size) OR CPU_COUNT*5*1.5*(OS message size)

-- Add extra 2MB per CPU_COUNT for shared pool.

Here is the example:-

Sun Solaris server has threaded CPUs. 2 physical CPUs has 8 cores, and each core has 8 threads, then Oracle evaluates CPU_COUNT = 2*8*8=128.

When SGA_MAX_SIZE=900MB,
Minimum Buffer Cache = CPU_COUNT *Granule size = 128*4M = 512MB
Shared Pool can use 338MB

When SGA_MAX_SIZE=1200MB,
Minimum Buffer Cache = CPU_COUNT *Granule size = 128*8M = 1024MB
Shared Pool can use 176 MB, so ORA-4031 occurs despite larger SGA_MAX_SIZE.

You need to manually tune CPU_COUNT parameter to resolve this error.

Friday, December 2, 2011

ORA-02049: timeout: distributed transaction waiting for lock

ORA-02049: timeout: distributed transaction waiting for lock


simoutaneously transactions

Make sure that there were no any activities on the remote database

1) select * from v$parameter where upper(name)='DISTRIBUTED_LOCK_TIMEOUT'; ----60 to 300sec


alter system set distributed_lock_timeout=300 scope=spfile;
shut immediate
startup


2) The connection remains open until you end your local session or until the number of database links for your session exceeds the value of the initialization parameter OPEN_LINKS. If you want to reduce the network overhead associated with keeping the link open, then use this clause to close the link explicitly if you do not plan to use it again in your session.

3) increase SHARED_POOL_SIZE

or free space

ALTER SYSTEM FLUSH SHARED_POOL;

Oracle Internal Code Error


-----------------------------

ERROR:
ORA-06553: PLS-801: internal error [56319]


ERROR:
ORA-06553: PLS-801: internal error [56319]


Error accessing package DBMS_APPLICATION_INFO

--------------

shutdown immediate;
startup migrate;
@$ORACLE_HOME/rdbms/admin/utlirp.sql
shutdown immediate;
startup
@$ORACLE_HOME/rdbms/admin/utlrp.sql
connect system/password



execute dbms_stats.delete_database_stats



execute dbms_registry_sys.validate_components


select owner,object_type,count(*) from dba_objects where status='INVALID' group by owner,object_type;








---------------------



SQL> create table ldtest as select * from ldfibs;
create table ldtest as select * from ldfibs
*
ERROR at line 1:
ORA-00600: internal error code, arguments: [15735], [2160], [2152], [], [], [], [],


SQL>
SQL> alter system set parallel_execution_message_size = 4096 scope=spfile;

-----------------------


oracle operation table size details could not be determined


--------------------


ORA-25153: Temporary Tablespace is Empty



1)
CREATE TEMPORARY TABLESPACE temp2
TEMPFILE 'E:\SNSD1011\TEMP02.ORA' SIZE 5M REUSE
AUTOEXTEND ON NEXT 1M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 1M;

2)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temp2;

3)
DROP TABLESPACE temporary INCLUDING CONTENTS AND DATAFILES;

4)
cREATE TEMPORARY TABLESPACE temporary
TEMPFILE 'E:\SNSD1011\TEMP01.ORA' SIZE 500M REUSE
AUTOEXTEND ON NEXT 100M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 1M;

5)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temporary;

6)
DROP TABLESPACE temp2 INCLUDING CONTENTS AND DATAFILES;
7)
SELECT tablespace_name, file_name, bytes
FROM dba_temp_files WHERE tablespace_name = 'temporary';



--------------

SQL> EXEC dbms_stats.gather_schema_stats('LDBO',cascade=>TRUE);
BEGIN dbms_stats.gather_schema_stats('LDBO',cascade=>TRUE); END;

*
ERROR at line 1:
ORA-00600: internal error code, arguments: [qernsRowP], [1], [], [], [], [],
[], []



-----------------


Connectivity error: [Microsoft][ODBC driver for Oracle][Oracle]
ORA-01578: ORACLE data block corrupted (file # 1, block # 16382) ORA-01110: data file 1: 'D:\LMSD0304\SYSTEM01.ORA'


select * from V$DATABASE_BLOCK_CORRUPTION;


SELECT SEGMENT_TYPE,OWNER||'.'||SEGMENT_NAME
FROM DBA_EXTENTS
WHERE FILE_ID = 1 AND 16382 BETWEEN BLOCK_ID
AND BLOCK_ID+BLOCKS -1;




analyze table SYS.IDL_UB1$ validate structure;





BEGIN
DBMS_REPAIR.ADMIN_TABLES
(
TABLE_NAME => 'IDL_UB1$',
TABLE_TYPE => DBMS_REPAIR.REPAIR_TABLE,
ACTION => DBMS_REPAIR.CREATE_ACTION,
TABLESPACE => 'SYSTEM'
);
END;
/


Followers