Wednesday, February 8, 2012

Analyze Scheduling using oracle




---------------------------------------------------------------------frequency 1 day-----------------------------------------

BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'analyze',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN DBMS_STATS.gather_schema_stats(''LDBO'',CASCADE=>TRUE); END;',
start_date => '01-APR-12 11.00.00 PM ASIA/CALCUTTA',
repeat_interval => 'freq=DAILY',
end_date => '02-APR-13 11.00.00 PM ASIA/CALCUTTA',
enabled => TRUE,
comments => 'JOB to gather LDBO statistics');
END;
/


----------------- frequency 2 hours---------------------------------------

BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'analyze1',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN DBMS_STATS.gather_schema_stats(''LDBO'',CASCADE=>TRUE); END;',
start_date => '16-FEB-12 06.00.00 PM ASIA/CALCUTTA',
repeat_interval=> 'FREQ=HOURLY;INTERVAL=2',
end_date => '02-APR-13 11.00.00 PM ASIA/CALCUTTA',
enabled => TRUE,
comments => 'JOB to gather LDBO statistics every 2 hours');
END;
/

------------------------------------------frequency syntax

FREQ=[YEARLY | MONTHLY | WEEKLY | DAILY | HOURLY | MINUTELY | SECONDLY] ;


-------------------To run a job every Tuesday at 11:25

FREQ=DAILY; BYDAY=TUE; BYHOUR=11; BYMINUTE=25;

FREQ=WEEKLY; BYDAY=TUE; BYHOUR=11; BYMINUTE=25;

FREQ=YEARLY; BYDAY=TUE; BYHOUR=11; BYMINUTE=25;



------------------ To run a job Tuesday and Thursday at 11, 14 and 22 o'clock

FREQ=WEEKLY; BYDAY=TUE,THUR; BYHOUR=11,14,22;

EXPDP Data Pump Job Scheduling with rename dump and remove old files

1) create directory export_auto as 'd:\expdp1213';

create user dba_export_user identified by test123;

grant connect, create database link, resource, create view to dba_export_user;
grant unlimited tablespace to dba_export_user;
grant exp_full_database to dba_export_user;
grant read,write on directory export_auto to dba_export_user;
grant execute on dbms_flashback to dba_export_user;
grant create table to dba_export_user;
grant FLASHBACK ANY TABLE to dba_export_user;


2)

CREATE OR REPLACE PROCEDURE dba_export_user.start_export
IS
hdl_job NUMBER;
l_cur_scn NUMBER;
l_job_state VARCHAR2 (20);
l_status SYS.ku$_status1010;
l_job_status SYS.ku$_jobstatus1010;
BEGIN

begin
execute immediate 'drop table dba_export_user.AUTO_EXPORT';
exception when others then null;
end;

hdl_job := DBMS_DATAPUMP.OPEN ( operation => 'EXPORT', job_mode => 'FULL', job_name => 'AUTO_EXPORT' );
DBMS_DATAPUMP.add_file (handle => hdl_job,filename => 'exp1213.dmp',directory => 'EXPORT_AUTO',filetype => DBMS_DATAPUMP.ku$_file_type_dump_file);
DBMS_DATAPUMP.add_file (handle => hdl_job,filename => 'export.log',DIRECTORY => 'EXPORT_AUTO',filetype => DBMS_DATAPUMP.ku$_file_type_log_file);
DBMS_DATAPUMP.start_job (handle => hdl_job);
DBMS_DATAPUMP.wait_for_job (handle => hdl_job, job_state => l_job_state);
DBMS_OUTPUT.put_line ('Job exited with status:' || l_job_state);

DBMS_DATAPUMP.detach(handle => hdl_job);

----------------------RENAME BACKUP WITH DATE
begin
UTL_FILE.FRENAME ('EXPORT_AUTO','exp1213.DMP','EXPORT_AUTO','exp1213'||'_'||TO_CHAR(SYSDATE,'DDMMYYYY')||'.DMP');
end;

begin
UTL_FILE.FRENAME ('EXPORT_AUTO','export.log','EXPORT_AUTO','export'||'_'||TO_CHAR(SYSDATE,'DDMMYYYY')||'.LOG');
end;

------------DELETE TWO DAYS BEFORE BACKUP
begin
UTL_FILE.FREMOVE ('EXPORT_AUTO','exp1213'||'_'||TO_CHAR(SYSDATE-2,'DDMMYYYY')||'.DMP');
end;

begin
UTL_FILE.FREMOVE ('EXPORT_AUTO','export'||'_'||TO_CHAR(SYSDATE-2,'DDMMYYYY')||'.log');
end;

END;
/


3) Change the time, Date

begin
dbms_scheduler.create_job(
job_name => 'EXPORT_JOB'
,job_type => 'STORED_PROCEDURE'
,job_action => 'dba_export_user.start_export'
,start_date => '08-FEB-12 06.02.00.00 PM ASIA/CALCUTTA'
,repeat_interval => 'FREQ=DAILY; BYDAY=MON,TUE,WED,THU,FRI,SAT,SUN;'
,enabled => TRUE
,comments => 'EXPORT_DATABASE_JOB');
end;
/


Note: Rename the dmp file with sysdate on daily basis before next schedule time

manually execute backup job
EXEC dba_export_user.start_export;

check running job status
select * from DBA_datapump_jobs;

drop job
EXEC dbms_scheduler.drop_job('dba_export_user.start_export');

Monday, February 6, 2012

ORACLE AUDIT FOR ALTER COMMAND



CREATE TABLE DBA_AUDIT_TAB_KSH (USERNAME VARCHAR2(10), SQL_TEXT VARCHAR2(2000),TIMESTAMP DATE);

CREATE OR REPLACE TRIGGER DBA_AUDIT_KSH
BEFORE ALTER ON SCHEMA
DECLARE
sql_text ora_name_list_t;
stmt VARCHAR2(2000);
n integer;
dt date;
BEGIN
null;
IF (ora_dict_obj_type IN ( 'TABLE') )
then
n:= ora_sql_txt(sql_text);
FOR i IN 1..n LOOP
stmt := stmt || sql_text(i);
END LOOP;
dt:=TO_DATE(SYSDATE,'DD-MM-YYYY HH24:MI:SS');
INSERT INTO DBA_AUDIT_TAB_KSH (username,sql_text,timestamp) VALUES (user,stmt,dt);

END IF;
END DBA_AUDIT_KSH;
/


Saturday, February 4, 2012

Performance Tuning Basic Guidelines

** Redo Log files – ensure that redo log are allocated on the fast disk, with minimum activities.
** Temporary tablespaces – ensure that temporary tablespaces are allocated on the fast disk, with minimum activities.
** Fragmentation of tablespaces – defragmentize tablespaces, equal blocksize for INITIAL and NEXT extents.
** Shared Pool Sizing – 1/3 or more of total physical memory, and check for thrashing/paging/swapping activity.
** DB_BLOCK_BUFFER – to enable buffering of data from datafiles during query and updates/inserts operation.
** Use BIND variables – to minimize parsing of SQL and enable SQL area reuse, and standardize bind-variable naming conventions.
** Identical SQL statements – literally identical – to enable SQL area reuse.
** Initial/Next Extents sizing – ensure initial and next are the same. Should be as small as possible to avoid wastage of spaces, but at the same time large enough to minimize time spent in frequent

allocation.
** PCTINCREASE – zero to ensure minimum fragmentization.
** Small PCTUSED and large PCTFREE – to ensure sufficient spaces for INSERT intensive operation.
** Freelist groups – large values to ensure parallelization of INSERT-intensive operation.
** INITRANS and MAXTRANS – large values to enable large number of concurrent transactions to access tables.
** Readonly tablespaces – to minimize latches/enqueues resources, as well as PINGING in OPS.
** Create indexes for frequently accessed columns – especially for range scanning and equality conditions in “where” clause.
** Use hash indexes if equality conditions is used, and no range scanning involved.
** If joining of tables is used frequently, consider Composite Indexes.
** Use Clustered tables – columns allocated together.
** Create Index-Organized Tables when data is mostly readonly – to localize both the data and indexes together.
** Use PARALLEL hints to make sure Oracle parallel query is used.
** IO slaves – to enable multiple DB writers to write to disks.
** Minextents and Maxextents sizing – ensure as large as possible to enable preallocation.
** Avoid RAID5 – IO intensive (redo log, archivelog, temporary tablespace, RBS etc)
** MTS mode – to optimize OLTP transaction, but not BATCH environment.
** Partition Elimination – to enable unused tablespaces partition to be archived.
** Performance hit seriously when bitmap indexes used in table with heavy DML. Might have to drop and recreate the bitmap indexes.
** Increase LOG_SIMULTANEOUS_COPIES – minimize contention for redo copy latches.
** In SQLLoader - using direct path over conventional path loading.
** Using parallel INSERT... SELECT when inserting data that already exists in another table in the database – faster than parallel direct loader using SQLLoader.
** Create table/index using UNRECOVERABLE option to minimize REDO LOG updating. SQLloading can use unrecoverable features, or ARCHIVELOG disabled.
** Alter index REBUILD parallel 2 – to enable 2 parallel processes to index concurrently.
** Use large redo log files to minimize log switching frequency.
** Loading is faster when using SQLLOADING if data source is presorted in a file.
** Drop the indexes, and disable all the constraints, when using SQLloader. Recreate the indexes after SQLloader has completed.
** Use Star Query for Data Warehousing-like application: /*+ ORDERED USE_NL(facts) INDEX(facts fact_concat) */ or /*+ STAR */.
** Using Parallel DDL statements in:
** CREATE INDEX
** CREATE TABLE ... AS SELECT
** ALTER INDEX ... REBUILD
** The parallel DDL statements for partitioned tables and indexes are:
** CREATE TABLE ... AS SELECT
** CREATE INDEX
** ALTER TABLE ... MOVE PARTITION
** ALTER TABLE ... SPLIT PARTITION
** ALTER INDEX ... REBUILD PARTITION
** ALTER INDEX ... SPLIT PARTITION
** Parallel analyze on partitioned table - ANALYZE {TABLE,INDEX} PARTITION.
** Using Asynchronous Replication instead of Synchrnous replication.
** Create snapshot log to enable fast-refreshing.
** In Replication, use parallel propagation to setup multiple data streams.
** Using ALTER SESSION ….HASHED_JOINED_ENABLED.
** Using ALTER SESSION …. ENABLE PARALLEL DML.
** Use ANALYZE TABLE….ESTIMATE STATISTICS for large tables, and COMPUTE STATISTICS for small table, to create statistics to enable Cost-Based Optimizer to made more accurate decision on

optimization technique for the query.
** To reduce contention on the rollback segments, at most 2 parallel process transactions should reside in the same rollback segment.
** To speed up transaction recovery, the initialization parameter CLEANUP_ROLLBACK_ENTRIES should be set to a high value approximately equal to the number of rollback entries generated for the forward-

going operation.
** Using raw devices/partition instead of file system partition.
** Consider increasing the various sort related parameters:
** sort_area_size
** sort_area_retained_size
** sort_direct_writes
** sort_write_buffers
** sort_write_buffer_size
** sort_spacemap_size
** sort_read_fac
** Tune the database buffer cache parameter BUFFER_POOL_KEEP and BUFFER_POOL_RECYCLE to keep the buffer cache after use, or age out the data blocks to recycle the buffer cache for other data.
** Larger values of LOG_BUFFER reduce log file I/O, particularly if transac-tions are long or numerous. The default setting is four times the maximum data block size for the host operating system.
** DB_BLOCK_SIZE should be multiple of OS block size.
** SHARED_POOL_SIZE –The size in bytes of the area devoted to shared SQL and PL/SQL statements.
** The LOCK_SGA and LOCK_SGA_AREAS parameters lock the entire SGA or particular SGA areas into physical memory.
** You can force Oracle to load the entire SGA into main memory by set ting the PRE_PAGE_SGA=TRUE in the init.ora file. This load slows your startup process slightly, but eliminates cache misses on the

library and data dictionary during normal runs.
** Enable DB_BLOCK_CHECKSUM if automatic checksum on datablocks is needed, performance will be degraded slightly.
** Use EXPLAIN PLAN to understand how Oracle process the query – utlxplan.sql.
** Choose between FIRST_ROWS or ALL_ROWS hint in an individual SQL state-ment to determine the best response time required for returning data.
** Use bitmap indexes for low cardinality data.
** Use full-table scan when the data selected ranged over a large percentage of the tables.
** Use DB_FILE_MULTIBLOCK_READ_COUNT – to enable full table scans by a single multiblock read. Increase this value if full table scan is desired.
** Check if row migration or row chaining has occurred - running utlchain.sql.
** Choose between offline backup or online backup plan.

Monday, January 2, 2012

ORA-01114: IO error writing block to file 202 (block # 1473756)

Linux-x86_64 Error: 25: Inappropriate ioctl for device


ERROR at line 29:
ORA-01114: IO error writing block to file 202 (block # 1473756)
ORA-27072: File I/O error
Linux-x86_64 Error: 25: Inappropriate ioctl for device
Additional information: 4
Additional information: 1473756
Additional information: 90112
ORA-01114: IO error writing block to file 202 (block # 1473756)
ORA-27072: File I/O error
Linux-x86_64 Error: 25: Inappropriate ioctl for device
Additional information: 4
Additional information: 1473756
Additional information: 90112


---------------
select tablespace_name,file_name,bytes/1024/1024,status,autoextensible,increment_by,user_bytes/1024/1024 from dba_temp_files;

----------------------------------------------Existing Temp Tablespace------------------------------------
/ldccm_data1/ora11g/ldccmd/temp/ldccm_temp01.dbf
1024 ONLINE NO 0 1023

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp02.dbf
23028 ONLINE YES 1 23027

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp03.dbf
23026 ONLINE YES 1 23025


FILE_NAME
----------------------------------------------------------------
BYTES/1024/1024 STATUS AUT INCREMENT_BY USER_BYTES/1024/1024
--------------- ------- --- ------------ --------------------
/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp04.dbf
23026 ONLINE YES 1 23025

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp05.dbf
23026 ONLINE YES 1 23025

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp06.dbf
23026 ONLINE YES 1 23025



---------------------------------Solution-------------------

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp01.dbf

1)
CREATE TEMPORARY TABLESPACE temp2
TEMPFILE '/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp10.dbf' SIZE 1024M REUSE
AUTOEXTEND ON NEXT 500M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 500M;

2)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temp2;

3)
DROP TABLESPACE temporary INCLUDING CONTENTS AND DATAFILES;


---if dropping hang then restart oracle services or do following

SELECT b.tablespace,b.segfile#,b.segblk#,b.blocks,a.sid,a.serial#,
a.username,a.osuser, a.status
FROM v$session a,v$sort_usage b
WHERE a.saddr = b.session_addr;

alter system kill session 'SID_NUMBER, SERIAL#NUMBER'; kill those session that are not being used actually.

Now dropping the previous tablespace
DROP TABLESPACE


4)

CREATE TEMPORARY TABLESPACE temporary
TEMPFILE '/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp01.dbf' SIZE 20480M REUSE
AUTOEXTEND ON NEXT 1024M;

5)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temporary;

6)
DROP TABLESPACE temp2 INCLUDING CONTENTS AND DATAFILES;
7)

SELECT tablespace_name, file_name, bytes
FROM dba_temp_files WHERE tablespace_name = 'temporary';


-------------------------------------------

Friday, December 30, 2011

Oracle 11g new features

Automatic Memory Tuning - Automatic PGA tuning was introduced in Oracle 9i. Automatic SGA tuning was introduced in Oracle 10g. In 11g, all memory can be tuned automatically by setting one parameter.

SQL Performance Analyzer (Fully Automatic SQL Tuning) - Using SPA, you can tell 11g to automatically apply SQL profiles for statements where the suggested profile give 3-times better performance that the existing statement. The performance comparisons are done by a new administrative task during a user-specified maintenance window.

Automated Storage Load balancing - Oracle’s Automatic Storage Management (ASM) now enables a single storage pool to be shared by multiple databases for optimal load balancing. Shared disk storage resources can alternatively be assigned to individual databases and easily moved from one database to another as processing requirements change.

Enhanced ILM - Information Lifecycle Management (ILM) has been around for decades, but Oracle has made a push to codify the approach in 11g.

File Group Repository - Oracle introduced an exciting new feature in 10gr2 dubbed the Oracle File Group Repository (FGR). The FGR allows the DBA to define a logically-related group of files and build a version control infrastructure. The working of the Oracle file group repository were created to support Oracle Streams, and they mimic the functionality of an IBM mainframe generation data group (GDG), in that you can specify relative incarnations of the file sets (e.g. generation 0, generation -3).

Interval partitioning for tables - This is a new 11g partitioning scheme that automatically creates time-based partitions as new data is added. You can now partition by date, one partition per month for example, with automatic partition creation.

New load balancing utilities -There are several new load balancing utilities in 11g.

Web server load balancing - The web cache component includes Apache extension to load-balance transactions to the least-highly-loaded Oracle HTTP server (OHS).

RAC instance load balancing - Staring in Oracle 10g release 2, Oracle JDBC and ODP.NET provide connection pool load balancing facilities through integration with the new “load balancing advisory” tool. This replaces the more-cumbersome listener-based load balancing technique.

Automated Storage Load balancing - Oracle’s Automatic Storage Management (SAM) now enables a single storage pool to be shared by multiple databases for optimal load balancing. Shared disk storage resources can alternatively be assigned to individual databases and easily moved from one database to another as processing requirements change.

Data Guard Load Balancing – Oracle Data Guard allows for load balancing between standby databases.

Listener Load Balancing - If advanced features such as load balancing and automatic failover are desired, there are optional sections of the listener.ora file that must be present

New table Data Type "simple_integer" - A new 11g datatype dubbed simple_integer is introduced. The simple_integer data type is always NOT NULL, wraps instead of overflows and is faster than PLS_INTEGER.

Improved table/index compression - Segment compression now works for all DML, not just direct-path loads, so you can create tables compressed and use them for regular OLTP work. Also supports column add/drop.

Faster DML triggers - DML triggers are up to 25% faster. This especially impacts row level triggers doing updates against other tables (think Audit trigger).

Server-side connection pooling - In 11g server-side connection pooling, an additional layer to the shared server, to enable faster [actually to bypass] session creation. Server-side connection pooling allows multiple Oracle clients to share a server-side pool of sessions (USERIDs must match). Clients can connect and disconnect (think PHP applications) at will without the cost of creating a new server session - shared server removes the process creation cost but not the session creation cost.

RMAN UNDO bypass - RMAN backup can bypass undo. Undo tablespaces are getting huge, but contain lots of useless information. Now RMAN can bypass those types of tablespace. Great for exporting a tablespace from backup.

Capture/replay database workloads - Sounds appealing. You can capture the workload in prod and apply it in development. Oracle is moving toward more workload-based optimization, adjusting SQL execution plans based on existing server-side stress. This can be very useful for Oracle regression testing.

Scalability Enhancements - The features in 11g focused on scalability and performance can be grouped into four areas: Scalable execution, scalable storage, scalable availability and scalable management.

Virtual columns - Oracle 11g virtual table columns are columns that are actually functions ("create table t1 (c1 number, c2 number, c3 as (c1+c2) virtual"), and similarly, virtual indexes that are based on functions.

REF partitioning - The 11g REF partitioning allows you to partition a table based on the values of columns within other tables.

A "super" object-oriented DDL keyword - This is used with OO Oracle when instantiating a derivative type (overloading), to refer to the superclass from whence the class was derived.

Oracle 11g XML data storage - Starting in 11g, you can store XML either as a CLOB or a binary data type, adding flexibility. Oracle11g will support query mechanisms for XML including XQuery and SQL XML, emerging standards for querying XML data stored inside tables.

New Trigger features - A new type of "compound" trigger will have sections for BEFORE, ROW and AFTER processing, very helpful for avoiding errors, and maintaining states between each section.

Partitioning - partitioning by logical object and automated partition creation.

LOB's - New high-performance LOB features.

Automatic Diagnostic Repository (ADR) - When critical errors are detected, they automatically create an “incident”. Information relating to the incident is automatically captured, the DBA is notified and certain health checks are run automatically.

Health Monitor (HM) utility - The Health Monitor utility is an automation of the dbms_repair corruption detection utility. When a corruption-like problem happens, the HR utility will checks for possible corruption within database blocks, redo log blocks, undo segments, or dictionary table blocks.

Incident Packaging Service (IPS) - This wraps up all information about an incident, requests further tests and information if necessary, and allows you to send the whole package to Oracle Support.

Feature Based Patching - All one-off patches will be classified as to which feature they affect. This allows you to easily identify which patches are necessary for the features you are using. EM will allow you to subscribe to a feature based patching service, so EM automatically scans for available patches for the features you are using.

New Oracle11g Advisors - New 11g Oracle Streams Performance Advisor and Partitioning Advisor.

Table trigger firing order - Oracle 11g PL/SQL will you to specify trigger firing order.

Invisible indexes - Rich Niemiec claims that the new 11g "invisible indexes" are a great new feature. It appears that the invisible indexes will still exist, that they can just be marked as "invisible" so that they cannot be considered by the SQL optimizer. With the overhead of maintaining the index intact.

-------------Oracle11g High Availability & RAC new features-----------------

Oracle continues to enhance Real Application Clusters in Oracle11g and we see some exciting new features in RAC manageability and enhanced performance:

Oracle 11g RAC parallel upgrades - Oracle 11g promises to have a rolling upgrade features whereby RAC database can be upgraded without any downtime.

Oracle RAC load balancing advisor - Starting in 10gr2 we see a RAC load balancing advisor utility. Oracle says that the 11g RAC load balancing advisor is only available with clients which use .NET, ODBC, or the Oracle Call Interface (OCI).

ADDM for RAC - Oracle will incorporate RAC into the automatic database diagnostic monitor, for cross-node advisories.

Interval Partitioning - Robert Freeman notes that 11g "interval Partitioning makes it easier to manage partitions:

you want to partition every month and it would create the partitions for you? That is exactly what interval partitioning does. Here is an example:

create table selling_stuff_daily

( prod_id number not null, cust_id number not null

, sale_dt date not null, qty_sold number(3) not null

, unit_sale_pr number(10,2) not null

, total_sale_pr number(10,2) not null

, total_disc number(10,2) not null)

partition by range (sale_dt)

interval (numtoyminterval(1,'MONTH'))

( partition p_before_1_jan_2007 values

less than (to_date('01-01-2007','dd-mm-yyyy')));

Note the interval keyword. This defines the interval that you want each partition to represent. In this case, Oracle will create the next partition for dates less than 02-01-2007 when the first record that belongs in that partition is created."

Optimized RAC cache fusion protocols - moves on from the general cache fusion protocols in 10g to deal with specific scenarios where the protocols could be further optimized.

Oracle 11g RAC Grid provisioning - The Oracle grid control provisioning pack allows you to "blow-out" a RAC node without the time-consuming install, using a pre-installed "footprint".

Data Guard - Standby snapshot - The new standby snapshot feature allows you to encapsulate a snapshot for regression testing. You can collect a standby snapshot and move it into your QA database, ensuring that your regression test uses real production data.

Quick Fault Resolution - Automatic capture of diagnostics (dumps) for a fault.

-----------------Oracle 11g programming language support New Features------------

PHP - Improved PHP driver for Oracle.

Compilers - Improved native Java & PL/SQL compilers.

Oracle 11g XML Enhancements - Oracle 11g will also support Content Repository API for Java Technology (JSR 170). Oracle 11g has XML "duality", meaning that you can also embed XML directives inside PL/SQL and embed PL/SQL inside XML code. Oracle 11g XML will also support schema-based document Type Definitions (DTD's), to describe internal structure of the XML document.

Scalable Java - The next scalable execution feature is automatic creation of "native" Java code, with just one parameter for each type with an "on/off" value. This apparently provides a 100% performance boost for pure Java code, and a 10%-30% boost for code containing SQL.

Improved sequence management - A new features of Oracle 11g will bypass DML (sequence.nextval) and allow normal assignments on sequence values.

Intra-unit inlining. In C, you can write a macro that gets inlined when called. Now any stored procedure is eligible for inlining if Oracle thinks it will improve performance. No change to your code is required.

-----------------Oracle 11g PL/SQL New Features-------------------------

PL/SQL "continue" keyword - This will allow a "C-Like" continue in a loop, skipping an iteration to bypass any "else" Boolean conditions. A nasty PL/SQL GOTO statement is no longer required to exit a Boolean within a loop. Oracle professional Jurgen Kemmelings has an excellent PL/SQL example of the PL/SQL continue clause in-action:

begin

for i in 1..3

loop

dbms_output.put_line(’i=’||to_char(i));

if ( i = 2 )

then

continue;

end if;

dbms_output.put_line(’Only if i is not equal to 2′);

end loop;

end;

Disabled state for PL/SQL - Another 11g new feature is a "disabled" state for PL/SQL (as opposed to "enabled" and "invalid" in dba_objects).

Easy PL/SQL compiling - Native Compilation no longer requires a C compiler to compile your PL/SQL. Your code goes directly to a shared library.

Improved PL/SQL stored procedure invalidation mechanism - A new 11g features will be fine grained dependency tracking, reducing the number of objects which become invalid as a result of DDL.

Scalable PL/SQL - The next scalable execution feature is automatic creation of "native" PL/SQL (and Java code), with just one parameter for each type with an "on/off" value. This apparently provides a 100% performance boost for pure PL/SQL code, and a 10%-30% performance boost for code containing SQL.

Enhanced PL/SQL warnings - The 11g PL/SQL compiler will issue a warning for a "when others" with no raise.

Stored Procedure named notation - Named notation is now supported when calling a stored procedure from SQL.

--------------Oracle 11g SQL New Features-------------------------

New "pivot" SQL clause - The new "pivot" SQL clause will allow quick rollup, similar to an MS-Excel pivot table, where you can display multiple rows on one column with SQL. MS SQL Server 2005 also introduced a pivot clause.

The /*+result_cache*/ SQL hint - This suggests that the result data will be cached in the data buffers, and not the intermediate data blocks that were accessed to obtain the query results. You can cache SQL and PL/SQL results for super-fast subsequent retrieval. The "result cache" ties into the "scalable execution" concept. There are three areas of the result cache:

The SQL query result cache - This is an area of SGA memory for storing query results.

The PL/SQL function result cache - This result cache can store the results from a PL/SQL function call.

The OCI client result cache - This cache retains results from OCI calls, both for SQL queries or PL/SQL functions.

Scalable Execution - This 11g feature consists of a number of features, the first of which is query results caching; this feature automatically caches the results of an SQL query as opposed to the data blocks normally cached by the buffer cache, and works both client (OCI) and server side - this was described as "buffer cache taken to the next level". The DBA sets the size of the results cache and turns the feature on at a table level with the command "alter table DEPT cache results", the per-process cache is shared across multiple session and at the client level, is available with all 11g OCI-based clients.

XML SQL queries - Oracle11g will support query mechanisms for XML including XQuery and SQL XML, emerging standards for querying XML data stored inside tables.

SQL Replay - Similar to the previous feature, but this only captures and applies the SQL workload, not total workload.

Improved optimizer statistics collection speed - Oracle 11g has improved the dbms_stats performance, allowing for an order of magnitude faster CBO statistics creation. Oracle 11g has also separated-out the "gather" and "publish" operations, allowing CBO statistics to be retained for later use. Also, Oracle 11g introduces multi-column statistics to give the CBO the ability to more accurately select rows when the WHERE clause contains multi-column conditions or joins.

SQL execution Plan Management - Oracle 11g SQL will allow you to fix execution plans (explain plan) for specific statements, regardless of statistics or database version changes.

Dynamic SQL. DBMS_SQL is here to stay. It's faster and is being enhanced. DBMS_SQL and NDS can now accept CLOBs (no more 32k limit on NDS). A ref cursor can become a DBMS_SQL cursor and vice versa. DBMS_SQL now supprts user defined types and bulk operations.

SQL Performance Advisor - You can tell 11g to automatically apply SQL profiles for statements where the suggested profile give 3-times better performance that the existing statement. The performance comparisons are done by a new administrative task during a user-specified maintenance window.

Improved SQL Access Advisor - The 11g SQL Access Advisor gives partitioning advice, including advice on the new interval partitioning. Interval partitioning is an automated version of range partitioning, where new equally-sized partitions are automatically created when needed. Both range and interval partitions can exist for a single table, and range partitioned tables can be converted to interval partitioned tables.

Automatic Memory Tuning - Automatic PGA tuning was introduced in Oracle 9i. Automatic SGA tuning was introduced in Oracle 10g. In 11g, all memory can be tuned automatically by setting one parameter. You literally tell Oracle how much memory it has and it determines how much to use for PGA, SGA and OS Processes. Maximum and minimum thresholds can be set. This is controlled by the Oracle 11g memory_target parameter.

Resource Manager - The 11g Resource Manager can manage I/O, not just CPU. You can set the priority associated with specific files, file types or ASM disk groups.

ADDM - The ADDM in 11g can give advice on the whole RAC (database level), not just at the instance level. Directives have been added to ADDM so it can ignore issues you are not concerned about. For example, if you know you need more memory and are sick of being told it, you can ask ADDM not to report those messages anymore.

Faster sorting - Starting in 10gr2 we see an improved sort algorithm, “Oracle10gRw introduced a new sort algorithm which is using less memory and CPU resources. A hidden parameter _newsort_enabled = {TRUE|FALSE} governs whether the new sort algorithm will be used.”

AWR Baselines - The AWR baselines of 10g have been extended to allow automatic creation of baselines for use in other features. A rolling week baseline is created by default.

Adaptive Metric Baselines - Notification thresholds in 10g were based on a fixed point. In 11g, notification thresholds can be associated with a baseline, so the notification thresholds vary throughout the day in line with the baseline.

Enhanced Password - Oracle 11g will have case sensitive passwords and also the password algorithm has changed to SHA-1 instead of the old DES based hashing used."

Oracle SecureFiles - replacement for LOBs that are faster than Unix files to read/write. Lots of potential benefit for OLAP analytic workspaces, as the LOBs used to hold AWs have historically been slower to write to than the old Express .db files.

Oracle 11g audit vault - Oracle Audit Vault is a new feature that will provide a solution to help customers address the most difficult security problems remaining today, protecting against insider threat and meeting regulatory compliance requirements.

FGAC for UTL_SMTP, UTL_TCP and UTL_HTTP. You can define security on ports and URLs.

Fine Grained Dependency Tracking (FGDT). This means that when you add a column to a table, or a cursor to a package spec, you don't invalidate objects that are dependent on them.

Database Workload Replay - Oracle "Replay" allows the total database workload to be captured, transferred to a test database created from a backup or standby database, then replayed to test the affects of an upgrade or system change.

You specify the SQL tuning sets similar to the 10g offering and use the dbms_sqlpa package (SQL performance analyzer) to manage the SQL each "analyzer task" with dbms_sqlpa procedures (create_analysis_task, cancel_analysis_task, drop_analysis_task, reset_analysis_task, report_analysis_task, resume_analysis_task, interrupt_analysis_task).

Oracle 11g PLSQL Native Compilation

Machine code is sometimes called native code when referring to platform-dependent parts of language features or libraries.

Change parameter value,

SQL>alter system set plsql_code_type=native scope=spfile;

when use PLSQL_CODE_TYPE='NATIVE', arithmetic operations are done directly in the hardware which provides significantly better performance.

To compile a PL/SQL package to native code without setting plsql_code parameter,

ALTER PACKAGE COMPILE PLSQL_CODE_TYPE=NATIVE;

To compile a PL/SQL procedure to native code without setting plsql_code parameter,

ALTER PROCEDURE COMPILE PLSQL_CODE_TYPE = NATIVE;

Procedure to convert the entire database and recompile all PL/SQL modules into native mode

1) Shut down database

2) Edit spfile.ora and set PLSQL_CODE_TYPE =native and plsql_optimise_level=2

3) connect sys/password as sysdba

startup upgrade

4) @$ORACLE_HOME/rdbms/admin/dbmsupgnv.sql (which updates the execution mode of all PL/SQL modules to native) (You can use the TRUE command line parameter with the dbmsupgnv.sql script to exclude package specs from recompilation to NATIVE, saving time in the conversion process.)

5) shutdown immediate

startup

@$ORACLE_HOME/rdbms/admin/utlrp.sql (to recompile all invalid objects)

Thursday, December 29, 2011

Oracle Auditing

select name,value from v$parameter where name='audit_trail';
ALTER SYSTEM SET AUDIT_TRAIL=DB SCOPE=SPFILE;
Shutdown
startup
--


select * from dba_priv_audit_opts;
select * from dba_audit_session;
select * from dba_audit_trail;

select * from dba_stmt_audit_opts
union
select * from dba_priv_audit_opts;

select * from dba_audit_exists;
select * from dba_audit_object;
select * from dba_audit_session;
select * from dba_audit_statement;
select * from dba_audit_trail;
select * from dba_obj_audit_opts;
select * from dba_priv_audit_opts;
select * from dba_stmt_audit_opts;
----
audit all by KGUPTA2 by access;
noaudit all by KGUPTA2;

audit create session by access;
audit audit system by access;
audit grant any privilege by access;
audit grant any object privilege by access;
audit grant any role by access;
audit create user by access;
audit create any table by access;
audit create public database link by access;
audit create any procedure by access;
audit alter user by access;
audit alter any table by access;
audit alter any procedure by access;
audit alter database by access;
audit alter system by access;
audit alter profile by access;
audit drop user by access;
audit drop any procedure by access;
audit drop any table by access;
audit drop profile by access;

audit select table, insert table, update table, delete table by payroll by access;
--
Auditing user activity with the Oracle audit command

Oracle has syntax for auditing specific user activity. To audit the activity of user KGUPTA2 we could issue these audit commands:
Audit all Oracle user activity.

This audits everything including DDL (create table), DML (inserts, updates, deletes) and login/logoff events:

audit all by kGUPTA2 by access;

Audit all Oracle user viewing activity:

audit select table by KGUPTA2 by access;

Audit all Oracle user data change activity:

audit update table, delete table,insert table by KGUPTA2 by access;
Audit all Oracle user viewing activity:

audit execute procedure by KGUPTA2 by access;


AUDIT INSERT, UPDATE ON LDBO.ACCOUNTS by access;
AUDIT ALL ON LDBO.ACCOUNTS_SEQUENCE;

Setting Default Auditing Options: Example The following statement specifies default auditing options for objects created in the future:

AUDIT ALTER, GRANT, INSERT, UPDATE, DELETE ON DEFAULT;

Any objects created later are automatically audited with the specified options that apply to them, if auditing has been enabled:
If you create a table, then Oracle Database automatically audits any ALTER, GRANT, INSERT, UPDATE, or DELETE statements issued against the table.
If you create a view, then Oracle Database automatically audits any GRANT, INSERT, UPDATE, or DELETE statements issued against the view.
If you create a sequence, then Oracle Database automatically audits any ALTER or GRANT statements issued against the sequence.
If you create a procedure, package, or function, then Oracle Database automatically audits any ALTER or GRANT statements issued against it.

SEQUENCE--- ALTER,AUDIT,GRANT,SELECT
TABLE OR VIEW -- ALTER,AUDIT,COMMENT,DELETE,GRANT,INDEX,INSERT,LOCK,RENAME,SELECT,UPDATE

audit update table, delete table,insert table by FRED by access;
---------------------------

audit all on ldbo.tbllocktable;
noaudit select on ldbo.tbllocktable;


select obj_name, sessionid, username, ses_actions, timestamp from dba_audit_trail where obj_name='TBLLOCKTABLE';


you'll get a result like (columns have been shortened for readability):

OBJ_NAME SESSIONID USERNAME SES_ACTIONS TIMESTAMP
-------- ---------- -------- ------------------- ------------------
TBLLOCKTABLE 23242623 LDBO -S-------------- 10-JUL-10
TBLLOCKTABLE 23122413 UIPL6724 ---------S------ 10-JUL-10
TBLLOCKTABLE 23092613 USSB0256 ---------S------ 10-JUL-10
TBLLOCKTABLE 23242311 LDBO ---------S------ 10-JUL-10
TBLLOCKTABLE 23092651 UIPL6722 ---------S------ 10-JUL-10
TBLLOCKTABLE 23242678 LDBO -S-------------- 10-JUL-10
The TIMESTAMP column indicates the time of the first audited action within the session. The SES_ACTIONS column is a session summary—a string of 16 characters, one for each action type in the order ALTER, AUDIT, COMMENT, DELETE, GRANT, INDEX, INSERT, LOCK, RENAME, SELECT, UPDATE, REFERENCES, EXECUTE, READ. (Positions 15, and 16 are reserved for future use). The characters are: - for none, S for success, F for failure, and B for both.



select obj_name, sessionid, username, ses_actions, timestamp from dba_audit_trail where obj_name='TBLLOCKTABLE';


-S--------------
---------S------
---------S------
---------S------
---------S------
-S--------------
----------S-----
----------S-----


Database Hardening

Following are the general guidelines used for DB hardening:

Complete server hardening checklist. Ideally, run on latest supported version (or at least a supported version) of the Operating System.

Use the latest generation of database server.

Install the latest vendor-provided patches for the database. Be sure to include patches for database support software that isn’t directly bundled with the database.

Remove default usernames and passwords

Manually reviews installed stored procedures and delete those that aren’t going to be used. In many cases, most or all stored procedures can be deleted.

Where possible, isolate sensitive databases to their own servers. Databases containing Personally Identifiable Information, or otherwise sensitive data should be
protected from the Internet by a network firewall, and administrative/DBA access should be limited to as few individuals as possible.

Ensure that application access to the database is limited to the minimal access necessary. For example, reporting applications that just require read-only access should be appropriately limited.

Manually validate that logging of successful and failed authentication attempts is working.

Use complex names for database users. Use especially complex passwords for these users.

Create alternative administrative users for each DBA, rather than allowing multiple individual users to regularly use the default administrative account.



Wednesday, December 28, 2011

ORA-01652 unable to extend temp segment by 64 in tablespace USR

select
srt.tablespace,
srt.segfile#,
srt.segblk#,
srt.blocks,
a.sid,
a.serial#,
a.username,
a.osuser,
a.status
from
v$session a,
v$sort_usage srt
where
a.saddr = srt.session_addr
order by
srt.tablespace, srt.segfile#, srt.segblk#,
srt.blocks;


select inst_id, tablespace_name, total_blocks, used_blocks, free_blocks from gv$sort_segment;


Cause: Failed to allocate an extent of the required number of blocks for a temporary segment in the tablespace indicated.

Action: Use ALTER TABLESPACE ADD DATAFILE statement to add one or more files to the tablespace indicated.




ALTER TABLESPACE usr
ADD DATAFILE 'F:\CAPD1112\USERS02.ORA' SIZE 2048M
AUTOEXTEND ON
NEXT 1024M
MAXSIZE unlimited;

ORA-1653: unable to extend table by 4096 in tablespace USR


[Microsoft][ODBC driver for Oracle][Oracle]ORA-20014: isincode INE683A01023Stock Details could not be Inserted. ~-1653~ORA-01653: unable to extend table LDBO.POWEROFATTORNEYSTOCKS by 4096 in tablespac

ORA-1653: unable to extend table LDBO.POWEROFATTORNEYSTOCKS by 64 in tablespace USR

ORA-1653: unable to extend table LDBO.POWEROFATTORNEYSTOCKS by 4096 in tablespace USR

ORA-01652: unable to extend temp segment by 64 in tablespace USR


Solution

Addind Datafile to existing Tablespace:


ALTER TABLESPACE usr
ADD DATAFILE 'F:\CAPD1112\USERS02.ORA' SIZE 2048M
AUTOEXTEND ON
NEXT 1024M
MAXSIZE unlimited;

Saturday, December 24, 2011

ORA-01114: IO error writing block to file 201 (block # 763489) ORA-27072: I/O error Linux Error: 28: No space left on device


Event
Insert data into table


Error

ERROR at line 25:
ORA-01114: IO error writing block to file 201 (block # 763489)
ORA-27072: File I/O error
Linux-x86_64 Error: 28: No space left on device
Additional information: 4
Additional information: 763489
Additional information: -1
ORA-01114: IO error writing block to file 201 (block # 763489)
ORA-27072: File I/O error
Linux-x86_64 Error: 28: No space left on device
Additional information: 4
Additional information: 763489
Additional information: -1


Cause:
The device has run out of space. This could happen because disk space of files is not necessarily allocated at file creation time.


Action:


1) Remove unnecessary files to free up space.

2) Temp file system is full you need to add more space to temp or relocate the temp file system to another file system with more space :

select bytes/1024/1024/1024 size_GB,autoextensible,increment_by from dba_temp_files;


select owner,tablespace_name,segment_type,segment_name from dba_extents where file_id = 201 and block_id = 763489;

select file_name, tablespace_name, file_id, status from dba_data_files where file_id= 201
union all
select file_name, tablespace_name, file_id, status from dba_temp_files where file_id= 201;


--------------------

1)
CREATE TEMPORARY TABLESPACE temp2
TEMPFILE 'E:\APEXD1112\TEMP02.ORA' SIZE 500M REUSE
AUTOEXTEND ON NEXT 100M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 100M;

2)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temp2;

3)
DROP TABLESPACE temporary INCLUDING CONTENTS AND DATAFILES;

4)
cREATE TEMPORARY TABLESPACE temporary
TEMPFILE 'E:\APEXD1112\TEMP01.ORA' SIZE 500M REUSE
AUTOEXTEND ON NEXT 100M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 100M;

5)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temporary;

6)
DROP TABLESPACE temp2 INCLUDING CONTENTS AND DATAFILES;
7)
SELECT tablespace_name, file_name, bytes
FROM dba_temp_files WHERE tablespace_name = 'temporary';

--------------------


Create / Clear Temporary tablespace


1)
CREATE TEMPORARY TABLESPACE temp2
TEMPFILE 'E:\SNSD1011\TEMP02.ORA' SIZE 5M REUSE
AUTOEXTEND ON NEXT 1M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 1M;

2)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temp2;

3)
DROP TABLESPACE temporary INCLUDING CONTENTS AND DATAFILES;

4)
cREATE TEMPORARY TABLESPACE temporary
TEMPFILE 'E:\SNSD1011\TEMP01.ORA' SIZE 500M REUSE
AUTOEXTEND ON NEXT 100M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 1M;

5)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temporary;

6)
DROP TABLESPACE temp2 INCLUDING CONTENTS AND DATAFILES;
7)
SELECT tablespace_name, file_name, bytes
FROM dba_temp_files WHERE tablespace_name = 'temporary';

TNS-12518: TNS:listener could not hand off client connection TNS-12560 TNS-00530 32-bit Windows Error: 2: No such file or directory

Event: When more users are making connection, Connection is breaking

Error:

TNS-12518: TNS:listener could not hand off client connection
TNS-12560: TNS:protocol adapter error
TNS-00530: Protocol adapter error
32-bit Windows Error: 2: No such file or directory


RCA:

1) check listener.log size if in GBs then stop listener services and rename it and start listener services

2) check sga size of all database on that server and compare with server RAM

3) check virtual memory and set it to recommended by windows.

4) Restart server to apply virtual memory settings.


Friday, December 23, 2011

Transparent Data Encryption (TDE)




seLect * from dict where table_name like '%WALLET%';

seLect * from dict where table_name like '%ENCRYPT%';


CREATE TABLE ENCRYPT_TEMP (FIRST VARCHAR2(20),SECOND VARCHAR2(20));

SQLNET.ORA

ENCRYPTION_WALLET_LOCATION =
(SOURCE=
(METHOD=file)
(METHOD_DATA=
(DIRECTORY=D:\ORACLE\PRODUCT\10.2.0\ADMIN\NBS1112\WALLET)))

CREATE WALLET DIRECTORY ON D:\ORACLE\PRODUCT\10.2.0\ADMIN\NBS1112

Create a Secure Wallet to hold the Master Encryption Key:

alter system set encryption key authenticated by "ImOracle";




ALTER TABLE ENCRYPT_TEMP MODIFY(FIRST ENCRYPT);

ALTER TABLE ENCRYPT_TEMP MODIFY (first DECRYPT); ---to decrypt


ALTER SYSTEM SET WALLET CLOSE;



SQL> SELECT * FROM ENCRYPT_TEMP;
SELECT * FROM ENCRYPT_TEMP
*
ERROR at line 1:
ORA-28365: wallet is not open




SQL> alter system set encryption key authenticated by "ImOracle";

System altered.

SQL> SELECT * FROM ENCRYPT_TEMP;

FIRST SECOND
-------------------- --------------------
AB1 ZX2
AB1 ZX2
AB1 ZX233

SQL>


SQL> ALTER SYSTEM SET WALLET CLOSE;

System altered.

SQL> SELECT SECOND FROM ENCRYPT_TEMP;

SECOND
--------------------
ZX2
ZX2
ZX233

Thursday, December 22, 2011

Find and Delete duplicate check Constraints

--------------------------------------------------------------------------------
--------------------------------------------------------------------------------

-- Find duplicate check constraints in current user's schema that have Oracle
-- generated names (like 'SYS_C%'). A script (c:\drop_duplicate_constraints.sql)
-- is generated to drop those duplicates.
--
-- This removes those duplicated constraints that were probably created by
-- improper usage of the Oracle imp utility. Using CONSTRAINTS=N will not
-- create duplicate constraints when importing into an existing table.
--
-- Tables js_constraints and js_constraints_min are dropped and creaed
-- the current user's schema, drop them to clean up.
--
-- WARNING: Review this and the generated script
-- c:\drop_duplicate_constraints.sql before running it at YOUR RISK.
-- I do not accept any responsibility for what YOU DO TO YOUR DATABASES !
--
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
set linesize 120
set pagesize 50000
set timing on

drop table js_constraints
/
create table js_constraints
(
owner varchar2(30),
constraint_name varchar2(30),
table_name varchar2(30),
search_condition varchar2(2000)
)
/
declare
cursor js_cursor is
select
owner,
table_name,
constraint_name,
search_condition
from user_constraints
where owner = user
and constraint_type = 'C'
and constraint_name like 'SYS_C%';
js_rec js_cursor%rowtype;
commit_interval constant integer := 10000;
records_processed integer := 0;
begin
open js_cursor;
loop
fetch js_cursor into js_rec;
exit when js_cursor%notfound;
insert into js_constraints (
owner,
table_name,
constraint_name,
search_condition)
values (
js_rec.owner,
js_rec.table_name,
js_rec.constraint_name,
substr(js_rec.search_condition,1,2000)
);
records_processed := records_processed + 1;
if records_processed = commit_interval then
commit;
records_processed := 0;
end if;
end loop;
commit;
close js_cursor;
end;
/
create index js_constraints_x1 on js_constraints (owner, table_name, search_condition)
/
drop table js_constraints_min
/
create table js_constraints_min as
select
owner,
table_name,
search_condition,
min(translate(constraint_name, 'SYC_', '9999')) as min_constraint_name,
'at least 30 dummy characters !!!' as constraint_name
from js_constraints
group by owner, table_name, search_condition
/
update js_constraints_min cm
set constraint_name = (select constraint_name from js_constraints c
where c.owner = cm.owner
and c.table_name = cm.table_name
and translate(c.constraint_name, 'SYC_', '9999') = cm.min_constraint_name)
/
delete from js_constraints
where (owner, table_name, search_condition, translate(constraint_name, 'SYC_', '9999')) in
(select owner, table_name, search_condition, min_constraint_name
from js_constraints_min)
/


-------spool c:\js_drop_duplicate_constraints.sql
select
'alter table ' || c.owner || '.' || c.table_name ||
' drop constraint ' || c.constraint_name ||
' --duplicate of ' || cm.constraint_name || chr(13) || '/'
from js_constraints c, js_constraints_min cm
where c.owner = cm.owner
and c.table_name = cm.table_name
and c.search_condition = cm.search_condition
order by c.owner, c.table_name, cm.min_constraint_name, c.constraint_name
/
spool off

Oracle Connection taking long time to establish / tnsping taking too long

1) Check listener.log size
At 10g, D:\oracle\product\10.2.0\db_1\NETWORK\log
At 11g, D:\app\Administrator\diag\tnslsnr\apex11-PC\listener\trace

2) If listener.log size is in GBs then
a) Stop listener services
b) Rename listener.log
c) Start listener services


lsnrctl>show

lsnrctl>set Log_status off

rename Listener.log to listenerold.log (from the command prompt)

lsnrctl>set Log_Status on

it will start listener.log automatically



3) Tnsping



ping
tracert
tnsping

Running SQL query


------------old

set heading off

select aa from (
select distinct a.sql_id,a.piece,a.sql_text aa,b.first_load_time from v$sqltext a,v$sqlarea b,v$session c where
a.sql_id=b.sql_id
AND c.command = b.command_type
And C.Username = B.Parsing_Schema_Name
and c.sql_hash_value = b.hash_value
and c.sql_address = b.address
and c.username is not null
And B.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
order by first_load_time desc,sql_id,piece);




-------------------sqlplus

set heading off

select aa from (
select a.sql_id,a.piece,to_char(b.SQL_FULLTEXT) aa,b.first_load_time from v$sqltext a,v$sqlarea b,v$session c where
a.sql_id=b.sql_id
AND c.command = b.command_type
And C.Username = B.Parsing_Schema_Name
and c.sql_hash_value = b.hash_value
and c.sql_address = b.address
and c.username is not null
And B.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
order by first_load_time desc,sql_id,piece);



------------full sqltext--------at plsql---- sqldeveloper-----

select aa from (
select a.sql_id,a.piece,to_char(b.SQL_FULLTEXT) aa,b.first_load_time from v$sqltext a,v$sqlarea b,v$session c where
a.sql_id=b.sql_id
AND c.command = b.command_type
And C.Username = B.Parsing_Schema_Name
and c.sql_hash_value = b.hash_value
and c.sql_address = b.address
and c.username is not null
And B.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
order by first_load_time desc,sql_id,piece);


----------------------------sqltext with operation-----------

SELECT s.sql_fulltext, sp.id, sp.parent_id, sp.operation, sp.object_name
FROM v$sqlarea s, v$sql_plan sp ,v$session c
WHERE s.address = sp.address AND s.hash_value = sp.hash_value
AND s.plan_hash_value = sp.plan_hash_value
And C.Username = s.Parsing_Schema_Name
and c.sql_hash_value = s.hash_value
and s.Parsing_Schema_Name='LDBO'
and c.module='ld.exe'
ORDER BY s.plan_hash_value,sp.id;



Microsoft ODBC driver for Oracle on 64 bit Machine

oracle(tm) client and networking components were not found. these components are supplied by oracle corporation and are part of the oracle version 7.2 (or greater) client software installation

you will be unable to use the drive until these software


The Microsoft ODBC Driver can be installed under 64-bit Windows but 64-bit applications cannot access MS ODBC driver because it comes only in 32-bit version. For 32-bit applications under 64-bit Windows there's ODBC Data Source Administrator for the 32-bit ODBC drivers %systemdrive%\Windows\SysWoW64\odbcad32.exe (usually C:\WINDOWS\SysWOW64\odbcad32.exe).


They put the 32 bit odbcad32.exe in the syswow64 directory. They put the 64 bit odbcad32.exe in the system32 directory. 32 bit apps will pick up the 32 bit registry setting and 64 bit will pick up the 64 bit registry setting. system32 comes before syswow64 in the system path so the 64bit software runs before the 32 bit software.


Install Oracle Server 10.2.0.4

Solution: also install oracle client versiob 10.2.0.3 and above to get required driver



Table Defragmentation / Table Reorganization / Table Rebuilding

Tables in Oracle database become fragmented after mass deletion, or after so many delete and/or insert operations. If you are running a 24×7 DB, you don’t

have an option to reorganize (or defragement) the table by traditional export/truncate/import method, i.e., exporting data from affected table, truncate the

table, then importing data back to the table.
There is an “alter table table_name move” command that you can use to defragment tables.
Note: This method does not apply to tables with with 'LONG' columns.



--------detecting chained row-----

SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;


List Chained Rows

Creating a CHAINED_ROWS Table

@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlchain.sql will create following table


create table CHAINED_ROWS (
owner_name varchar2(30),
table_name varchar2(30),
cluster_name varchar2(30),
partition_name varchar2(30),
subpartition_name varchar2(30),
head_rowid rowid,
analyze_timestamp date
);


SELECT owner_name,table_name, head_rowid FROM chained_rows;


-------------------


SELECT dbms_rowid.rowid_block_number(rowid) "Block-Nr", count(*) "Rows"
FROM
GROUP BY dbms_rowid.rowid_block_number(rowid) order by 1;


SELECT chain_cnt,
round(chain_cnt/num_rows*100,2) pct_chained,
avg_row_len, pct_free , pct_used
FROM user_tables
WHERE TABLE_NAME IN (
SELECT distinct table_name FROM CHAINED_ROWS);



If the table includes LOB column(s), this statement can be used to move the table along with LOB data and LOB index segments (associated with this table)

which the user explicitly specifies. If not specified, the default is to not move the LOB data and LOB index segments.




---------------------------Detect all Tables with Chained and Migrated Rows------------------------


1) Analyze all or only your Tables


SELECT 'ANALYZE TABLE '||table_name||' LIST CHAINED ROWS INTO CHAINED_ROWS;' FROM user_tables;


Analyze only chained rows tables

SELECT owner, table_name, chain_cnt FROM dba_tables WHERE owner='LDBO' and chain_cnt > 0;


set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\chained_statistics.sql;

SELECT 'ANALYZE TABLE ' ||table_name||' LIST CHAINED ROWS INTO CHAINED_ROWS;'
FROM dba_tables WHERE owner='LDBO' and chain_cnt > 0;

spool off



2) Alter Table ......Move

set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\defrag.sql;

SELECT DISTINCT 'ALTER TABLE ' ||table_name|| FROM CHAINED_ROWS;
spool off


or

select sum(bytes/1024/1024) "FOR INITIAL VALUE OR MORE"
from dba_segments
where owner = 'LDBO'
and segment_name = 'TBLOPTIONACCESSHISTORY';


SELECT DISTINCT 'ALTER TABLE ' ||table_name||' MOVE PCTFREE 20 PCTUSED 40 STORAGE (INITIAL 20K NEXT 40K MINEXTENTS 2 MAXEXTENTS 20 PCTINCREASE 0);' FROM

CHAINED_ROWS;


3) Rebuild Indexes because these tables’s indexes are in unstable state.

connect deltek/xxx@fin;
set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\rebuild_index.sql;

SELECT 'ALTER INDEX ' ||INDEX_NAME||' REBUILD;' FROM DBA_INDEXES WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);
spool off

4) Analyze Tables for compute statistics after defragmentation

set heading off;
set feedback off;
set pagesize 1000;
spool C:\temp\compute_stat.sql;

SELECT 'ANALYZE TABLE '||table_name||' COMPUTE STATISTICS;' FROM user_tables WHERE TABLE_NAME IN ( SELECT distinct table_name FROM CHAINED_ROWS);

spool off


5) Show the RowIDs for all chained rows

This will allow you to quickly see how much of a problem chaining is in each table. If chaining is prevalent in a table, then that table should be rebuild

with a higher value for PCTFREE

SELECT owner_name,
table_name,
count(head_rowid) row_count
FROM chained_rows
GROUP BY owner_name,table_name
/



6) SELECT owner, table_name, chain_cnt FROM dba_tables WHERE chain_cnt > 0;

SELECT dbms_rowid.rowid_block_number(rowid) "Block-Nr", count(*) "Rows"
FROM row_mig_chain_demo
GROUP BY dbms_rowid.rowid_block_number(rowid) order by 1;


delete FROM chained_rows;

Followers