Saturday, March 24, 2012

PGA Top Consumer

set pagesize 1000
set lines 100
col sid format 9999
col username format a12
col module format a30
column pga_memory_mb format 9,999.99 heading "PGA MB"
column max_pga_memory_mb format 9,999.99 heading "PGA MAX|MB"
col service name format a20
col sql_text format a70 heading "Currently executing SQL"
set echo on

WITH pga AS
(SELECT sid,
ROUND(SUM(CASE name WHEN 'session pga memory'
THEN VALUE / 1048576 END),2) pga_memory_mb,
ROUND(SUM(CASE name WHEN 'session pga memory max'
THEN VALUE / 1048576 END),2) max_pga_memory_mb
FROM v$sesstat
JOIN v$statname USING (statistic#)
WHERE name IN ('session pga memory','session pga memory max' )
GROUP BY sid)
SELECT sid, username,s.module,
pga_memory_mb,
max_pga_memory_mb, substr(sql_text,1,70) sql_text
FROM v$session s
JOIN (SELECT sid, pga_memory_mb, max_pga_memory_mb,
RANK() OVER (ORDER BY pga_memory_mb DESC) pga_ranking
FROM pga)
USING (sid)
LEFT OUTER JOIN v$sql sql
ON (s.sql_id=sql.sql_id and s.sql_child_number=sql.child_number)
WHERE pga_ranking <=5
ORDER BY pga_ranking
/

PGA incease When???

Whenever the value of the v$sysstat statistic estimated PGA memory for one-pass exceeds pga_aggregate_target, then you’ll want to increase pga_aggregate_target.


select name,value/1024/1024 from v$sysstat where name like '%pga%';

select value/1024/1024 from v$parameter where name like '%pga%';




Whenever the value of the v$sysstat statistic workarea executions—multipass is greater than 1 percent, the database may benefit from additional RAM memory.

select name,value from v$sysstat where name = 'workarea executions - multipass';




You can overallocate PGA memory and you may consider reducing the value of pga_aggregate_target whenever the value of the v$sysstat row workarea executions—optimal consistently measures 100 percent.

select name,value from v$sysstat where name = 'workarea executions - optimal';



---------------------------PGA top consumer-----------------

set pagesize 1000
set lines 100
col sid format 9999
col username format a12
col module format a30
column pga_memory_mb format 9,999.99 heading "PGA MB"
column max_pga_memory_mb format 9,999.99 heading "PGA MAX|MB"
col service name format a20
col sql_text format a70 heading "Currently executing SQL"
set echo on

WITH pga AS
(SELECT sid,
ROUND(SUM(CASE name WHEN 'session pga memory'
THEN VALUE / 1048576 END),2) pga_memory_mb,
ROUND(SUM(CASE name WHEN 'session pga memory max'
THEN VALUE / 1048576 END),2) max_pga_memory_mb
FROM v$sesstat
JOIN v$statname USING (statistic#)
WHERE name IN ('session pga memory','session pga memory max' )
GROUP BY sid)
SELECT sid, username,s.module,
pga_memory_mb,
max_pga_memory_mb, substr(sql_text,1,70) sql_text
FROM v$session s
JOIN (SELECT sid, pga_memory_mb, max_pga_memory_mb,
RANK() OVER (ORDER BY pga_memory_mb DESC) pga_ranking
FROM pga)
USING (sid)
LEFT OUTER JOIN v$sql sql
ON (s.sql_id=sql.sql_id and s.sql_child_number=sql.child_number)
WHERE pga_ranking <=5
ORDER BY pga_ranking
/

--------------------------------------------

Oracle Hidden Parameter

SELECT
n.ksppinm AS "name",
v.ksppstvl AS "value",
n.ksppdesc AS "description"
FROM
x$ksppi n,
x$ksppsv v
WHERE 1 = 1
AND n.indx = v.indx
ORDER BY
1;

Oracle 10g Scheduler Job Email Notification

1) connect sys as sysdba user and run two scripts for install and configure utl_mail package

SQL> conn sys@apx1213 as sysdba
Enter password: ******
Connected.
SQL> @d:\oracle\product\10.2.0\db_1\rdbms\admin\utlmail.sql

Package created.


Synonym created.

SQL> @d:\oracle\product\10.2.0\db_1\rdbms\admin\prvtmail.plb;

Package body created.

No errors.

2) Set SMTP_OUT_SERVER parameter for smtp_exchange_server. This parameter is not modifiable means we have to bounce our database to set this parameter

SQL> alter system set smtp_out_server = 'mail.apexsoftcell.com' scope=spfile;

SQL> shutdown immediate;
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> startup

3) Grant EXECUTE privs to user which use utl_mail package.

SQL> grant execute on utl_mail to ldbo;

Grant succeeded.

) Create procedure for Email Notification

create or replace procedure PRC_EMAIL (pSubject IN VARCHAR2, pMessage IN VARCHAR2) is
BEGIN
utl_mail.send(sender => 'info@apexsoftcell.com', recipients => 'kshitij@apexsoftcell.com', subject => pSubject, message => pMessage);
END;
/

4) Create Scheduler Job

BEGIN
DBMS_SCHEDULER.drop_JOB (job_name => 'compile');
END;
/


BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'compile',
job_type => 'PLSQL_BLOCK',
job_action => 'DECLARE lnResult VARCHAR2(150);
BEGIN UTL_RECOMP.recomp_serial(''LDBO''); lnResult:=''SUCCESS'';
PRC_EMAIL(''Compile Notification'',lnResult);
EXCEPTION WHEN OTHERS THEN lnResult:=SUBSTR(SQLERRM,1,150);
PRC_EMAIL(''Compile Notification'',lnResult);
END;',
start_date => SYSTIMESTAMP,
repeat_interval => 'freq=hourly; byminute=0; bysecond=0;',
end_date => NULL,
enabled => TRUE,
comments => 'Compile job');
END;
/

5) Execute Job Manually
exec DBMS_SCHEDULER.run_job ('compile');

Thursday, March 15, 2012

Server Configuration Planning


1) Existing server configuration (Processor, No of CPU, RAM, Disk Capacity, … , …)

2) No. of running databases on server
3) Databases folder size of all years


select a.data_size+b.temp_size+c.redo_size+d.controlfile_size "DB_Folder_size_GB"
from ( select sum(bytes)/1024/1024/1024 data_size
from dba_data_files) a,
( select nvl(sum(bytes),0)/1024/1024/1024 temp_size
from dba_temp_files ) b,
( select sum(bytes)/1024/1024/1024 redo_size
from sys.v_$log ) c,
( select sum(BLOCK_SIZE*FILE_SIZE_BLKS)/1024/1024/1024 controlfile_size
from v$controlfile) d ;


4) Max concurrent connections in the database

Maximum concurrent connections (mcc) refers to the total number of sessions (connections) about which a device can maintain state simultaneously.

select highwater from dba_high_water_mark_statistics where name = 'SESSIONS';

select sum(inuse) from ( select name, round(sum(mb),1) mb, round(sum(inuse),1) inuse from (select case when name = 'buffer_cache' then 'db_cache_size'
when name = 'log_buffer'
then 'log_buffer'
else pool
end name,
bytes/1024/1024 mb,
case when name <> 'free memory'
then bytes/1024/1024
end inuse
from v$sgastat
)group by name );

select
(select highwater from dba_high_water_mark_statistics where name = ('SESSIONS'))*(2048576+a.value+b.value) pga_size
from
v$parameter a,
v$parameter b
where
a.name = 'sort_area_size'
and
b.name = 'hash_area_size'
;

5) Connections Per Second
Connections per second (c/s) refers to the rate at which a device can establish state parameters for new connections.

6) Transactions Per Second
Transactions per second (t/s) refers to the number of complete actions of a particular type that can be performed per second.

6) Weekly or monthly growth of databases.

database_monitoring_script

7) Oracle core license??

8) Network Load (Bandwidth,.........)




-------------------------------
Memory in a data warehouse is particularly important for processing memory-intensive operations such as large sorts. Access to the data cache is less important in a data warehouse because most of the queries access vast amounts of data. Data warehouses do not have memory requirements as critical as OLTP applications.

The number of CPUs provides you a good guideline for the amount of memory you need. Use the following simplified formula to derive the amount of memory you need from the CPUs you selected:

= 2 *
For example, a system with 6 CPUs needs 2 * 6 = 12 GB of memory. Most standard servers fulfill this requirement.

------------------------------

select * from dba_high_water_mark_statistics where name in ('SESSIONS','DB_SIZE');
select * from v$resource_limit;


--------maximum amount of memory allocated by the currently connected sessions
SELECT SUM (value/1024/1024) "max memory allocation" FROM v$sesstat ss, v$statname st WHERE st.name = 'session uga memory max' AND ss.statistic# = st.statistic#;


---------------------------Used SGA-----------------
select sum(inuse) from (
select name, round(sum(mb),1) mb, round(sum(inuse),1) inuse
from (select case when name = 'buffer_cache'
then 'db_cache_size'
when name = 'log_buffer'
then 'log_buffer'
else pool
end name,
bytes/1024/1024 mb,
case when name <> 'free memory'
then bytes/1024/1024
end inuse
from v$sgastat
)group by name );


-------------------pga requirement------------

select
(select highwater from dba_high_water_mark_statistics where name = ('SESSIONS'))*(2048576+a.value+b.value) pga_size
from
v$parameter a,
v$parameter b
where
a.name = 'sort_area_size'
and
b.name = 'hash_area_size'
;
-------------------------------------
http://docs.oracle.com/cd/B28359_01/server.111/b28314/tdpdw_system.htm

http://www.wdpi.com/product/used-hp/proliant-servers/ml570
http://h18004.www1.hp.com/products/quickspecs/12474_na/12474_na.html
http://en.wikipedia.org/wiki/Intel_QuickPath_Interconnect
http://www.intel.com/content/www/us/en/io/quickpath-technology/quickpath-technology-general.html
http://www.dfisica.ubi.pt/~hgil/utils/Hyper-Threading.4_Turbo.Boost.html
http://www.intel.com/content/www/us/en/architecture-and-technology/hyper-threading/hyper-threading-technology.html
http://h18000.www1.hp.com/products/quickspecs/13669_na/13669_na.html
http://www.cpubenchmark.net/multi_cpu.html

http://www.dbspecialists.com/files/presentations/mts_case_study.html

Configuration for LD DB:
2 Quad core processor 64 BIT (upgradable to 4 processor)
16 or 32 GB RAM
14 * 200 GB HDD (SAN)
Operating system : Win 2003 enterprise edition 64 bit

HP ProLiant ML570

Intel® Dual-Core 64-bit Xeon® processor 7000 sequence
Processor-4
3.00 GHz, 800MHz FSB
32 or 64 GB RAM

----------------------------------------------------------------------Eight Core Processor
Intel® Xeon® Processor E7-8837 product Family Xeon E7-8800
(24M Cache, 2.66 GHz, 6.40 GT/s Intel® QPI)
Thermal Design Power 130W (refers to the maximum amount of power the cooling system in a computer is required to dissipate.)


clock speed 2.66GHz
QPI (QuickPath Interconnect) is a point-to-point processor interconnect developed by Intel which replaces the Front Side Bus (FSB) in Xeon, Itanium, and certain desktop platforms.
Processor-2
Processor Core: Octa-core (8 Core) / Quad-core (4 core)

32 or 64 GB RAM

---------------------------
64-bit Intel® Xeon® Processor 24M Cache, 2.66 GHz, 6.40 GT/s Intel® QPI
with intel turbo boost technology / hyper threading technology

Most Used Processor: Intel® Xeon® Processor E7-8837 @ 2.66 GHz

Processor- 2 Quad-core (4 core)
32 or 64 GB RAM

-------------------------------------------------------------------Quad-Core Processors-----------------------

Intel® Xeon® E7520 (1.86GHz/4-core/18MB/95W) Processor
Memory 16 or 32 GB
Storage 8TB



--------------------------------------


The key to this dramatic claim is a feature called Turbo Boost technology. Basically, if the current application workload isn't keeping all four cores fully busy and pushing right up against the chip's TDP (Thermal Design Power) limit, Turbo Boost can increase the clock speed of each core individually to get more performance out of the chip.

---------------------------------------

Ten-Core Processors
Intel® Xeon® E7-4870 (2.40GHz/10-core/30MB/130W) Processor
Intel® Xeon® E7-4860 (2.26GHz/10-core/24MB/130W) Processor
Intel® Xeon® E7-4850 (2.00GHz/10-core/24MB/130W) Processor
Intel® Xeon® E7-8867L (2.13GHz/10-core/30MB/105W) Processor
Eight-Core Processors
Intel® Xeon® E7-8837 (2.67GHz/8-core/24MB/130W) Processor
Intel® Xeon® E7-4830 (2.13GHz/8-core/24MB/105W) Processor
Intel® Xeon® E7-4820 (2.0GHz/8-core/18MB/105W) Processor
Intel® Xeon® X7560 (2.26GHz/8-core/24MB/130W) Processor
Intel® Xeon® X7550 (2.0GHz/8-core/18MB/130W) Processor
Intel® Xeon® L7555 (1.86GHz/8-core/24MB/95W) Processor
Six-Core Processors
Intel® Xeon® E7-4807 (1.86GHz/6-core/18MB/95W) Processor
Intel® Xeon® E7540 (2.0GHz/6-core/18MB/105W) Processor
Intel® Xeon® E7530 (1.86GHz/6-core/12MB/105W) Processor
Intel® Xeon® X7542 (2.66GHz/6-core/18MB/130W) Processor
Quad-Core Processors
Intel® Xeon® E7520 (1.86GHz/4-core/18MB/95W) Processor

NOTE: New Intel Microarchitecture with Intel Virtualization Technology FlexMigration. Industry Standard Intel® 7500 Chipset with four high-speed interconnects up to 6.4GT/s.


Hardware Requirement

Minimum Hardware Requirements
On small instances, server load is primarily driven by peak visitors.

5 Concurrent Users

2GHz+ CPU
512MB RAM
5GB database space
25 Concurrent Users

Quad 2GHz+ CPU
2GB+ RAM
10GB database space
----------------------------------------------------------RAM---------

= 2 *

-------------------------------------Disk-----------

Use the following formula to determine the number of disk arrays you need:

= /

For example, a system with 1200 MB per second throughput requires at least 1200 / 180 = 7 disk arrays.

Ensure you have enough physical disks to sustain the throughput you require. Ask your disk vendor for the throughput numbers of the disks.

---------------------------
PGA_AGGREGATE_TARGET = 3 * SGA_TARGET.
----------------



-------------------Measure the Cost of Each Operation---------------

Cost per request. You can calculate the cost in terms of processor cycles required for processing a request by using the following formula:
Cost (Mcycles/request) = ((number of processors x processor speed) x processor use) / number of requests per second

For example, using the values identified for the performance counters in Step 2, where processor speed is 1.3 GHz or 1300 Mcycles/sec, processor usage is 90 percent, and Requests/Sec is 441, you can calculate the page cost as:

((2 x 1,300 Mcycles/sec) x 0.90) / (441 Requests/Sec) = 5.30 Mcycles/request

Cost per operation. You can calculate the cost for each operation by using the following formula:
Cost per operation = (number of Mcycles/request) x number of pages for an operation

The cost of the Login operation is:

5.30 x 3 = 15.9 Mcycles


---------------Calculate the Cost of an Average User Profile

Average cost of profile in Mcycles/sec = Total cost for a profile / session length in seconds
Thus, the average cost for the profile is:

147.52/600 = 0.245 Mcycles/sec


---------------------------------Calculate Site Capacity


To calculate these values, use the following formulas:

Simultaneous users with a given profile that your application can currently support. After you determine the cost of the average user profile, you can calculate how many simultaneous users with a given profile your application can support given a certain CPU configuration. The formula is as follows:
Maximum number of simultaneous users with a given profile = (number of CPUs) x (CPU speed in Mcycles/sec) x (maximum CPU utilization) / (cost of user profile in Mcycles/sec)

Therefore, the maximum number of simultaneous users with a given profile that the sample application can support is:

(2 x 1300 x 0.75)/0.245 = 7,959 users

Future resource estimates for your site. Calculate the scalability requirements for the finite resources that need to be scaled up as the number of users visiting the site increases. Prepare a chart that gives you the resource estimates as the number of users increases.
Based on the formulas used earlier, you can calculate the number of CPUs required for a given number of users as follows:

Number of CPUs = (Number of users) x (Total cost of user profile in Mcycles/sec) / (CPU speed in MHz) x (Maximum CPU utilization)

If you want to plan for 10,000 users for the sample application and have a threshold limit of 75 percent defined for the processor, the number of CPUs required is:

10000 x 0.245 / (1.3 x 1000) x 0.75 = 2.51 processors

Your resource estimates should also factor in the impact of possible code changes or functionality additions in future versions of the application. These versions may require more resources than estimated for the current version.



-------------------------------------------------------------------------------
Assessing Your Application Performance Objectives
At this stage in capacity planning, you gather information about the level of activity expected on your server, the anticipated number of users, the number of requests, acceptable response time, and preferred hardware configuration. Capacity planning for server hardware should focus on maximum performance requirements and set measurable objectives for capacity.
For your application, take the information that you derive from Examining Results from the Baseline Applications, to see how your application differs from one of the baseline applications. For example, if you are using the HTTPS protocol for a business application similar to MedRec, you should examine the metrics provided for the heavy MedRec application. Perform the same logical process for all of the factors listed in Capacity Planning Factors.
The numbers that you calculate from using one of our sample applications are of course just a rough approximation of what you may see with your application. There is no substitute for benchmarking with the actual production application using production hardware. In particular, your application may reveal subtle contention or other issues not captured by our test applications.


Calculating Hardware Requirements
To calculate hardware capacity requirements:
Evaluate the complexity of your application, comparing it to one or more of the applications described in Examining Results from the Baseline Applications. The example in Guidelines for Calculating Hardware Requirements identifies this value as the Complexity Factor. If your application is about as complex as one of the baselines, your Complexity Factor = 1.
Consider what throughput is required for your application. In the example, this is called the Required TPS (transactions per second).
Take the preferred hardware TPS value from the appropriate table. The example in Guidelines for Calculating Hardware Requirements identifies this value as the Reference TPS.



Guidelines for Calculating Hardware Requirements
The number of computers required is calculated as follows:
Number of boxes = (Required TPS) / (Reference TPS / Complexity Factor)
For example, if your assessment shows:
Your application is twice as complex as the Light MedRec application; the Complexity Factor = 2.
The requirement is for a 400 TPS; the Required TPS = 400.
The preferred hardware configuration is Windows 2000 using 4x700 MHz processors.
The Reference TPS is 205, from Table 2-3, configuration number lmW1.
The number of boxes required is approximately equal to:
400/(205/2) = 400/102.5 = next whole number, 3.90 rounded up = 4 boxes.
Always test the capacity of your system before relying on it for production deployments.
-----------------------


For data-warehouse project hard disk performance is everything. database cache, indexes, execution plans, memory, number of processors will not make any difference if your server hard disks are slow.

For example you have a table with 10 gigs of data with no indexes. Running select (*) from table will require a full scan of of the table you hardisk does 100mb per sec

10gig/100meg =10 000 /100 = 100sec

Is 100 sec acceptable for you?

---------------------

Disk Size depends on database size

disk speed depends on TPS (transactions per second)
--------------
RAM depends on sort operation,merge join,......no of concurrent sessions....
Using the Sort transform is a fully blocking operation. Whatever rows you're asking the Sort to work on will be blocked in the data flow until the sort completes. The Sort is significantly slower if it can't put all that data into RAM - so that's a critical limit you should design for. Take your max rows that you expect into a Sort multiplied by the row length.

The Merge Join is a partially blocking operation. You don't have to be as concerned about this as the Sort, but if your join is particularly "malformed", it could require a lot of RAM to buffer one of the inputs while the other waits for rows.

--------------------------------
number of processors and speed depends on your data processing

----------------------

Factors Affecting Capacity Planning
There are various factors to consider when conducting a capacity-planning exercise. Each of the following factors has a significant impact on system performance (and on system capacity as well).
- Operational load at backend
- Front end load
- Number of concurrent users/requests
- Base load and peak load
- Number of processes and Instances of processes
- Log size
- Archival requirements
- Persistence requirements
- Base recommendations from vendor
- Installation requirements
- Test results and extrapolation
- Interface Architecture and Performance Tuning
- Processing requirements and I/O operations
- Network bandwidth and latency
- Architecture resilience
- Network/Transmission losses
- Load factor loss
- Legacy interfacing loss/overheads
- Complexity of events and mapping
- Factor of safety

--------------

Hardware capacity determination
The hardware requirements can be evaluated based on the test results for a given set of conditions. There are several tools available to simulate clients (LoadRunner, WebLOAD, etc.). By simulating the transactions mix client load can be generated and load can be increased by adding more concurrent users. This is an iterative process, and the goal is to achieve as high CPU utilization as possible. If the CPU utilization doesn't increase (and hasn't yet peaked out) with the addition of more users, database or application bottlenecks are analyzed. There are several commercially available profilers (IntroScope, OptimizeIt, and JProbe) that can be used to identify these hot spots. In a finely tuned system, the CPU utilization (at steady state) in ideal case is usually less than 70%. While throughput won't increase with the addition of more load, response times, on the other hand, will increase as more clients are added. The capacity of the hardware is the point where the response time increases for additional load.






Database Growth Monitoring

Step : 1 Calculate total Size of tablespace

select sum(bytes)/1024/1024 "TOTAL SIZE (MB)" from dba_Data_files;


Step : 2 Calculate Free Space in Tablespace

select sum(bytes)/1024/1024 "FREE SPACE (MB)" from dba_free_space;

Step : 3 Calculate total size , free space and used space in tablespace

select t2.total "TOTAL DISK USAGE",t1.free "FREE SPACE",(t1.free/t2.total)*100 "FREE (%)",(t2.total-t1.free) "USED SPACE", (1-t1.free/t2.total)*100 "USED (%)"
from (select sum(bytes)/1024/1024 free from dba_free_space) t1 , (select sum(bytes)/1024/1024 total from dba_Data_files) t2 ;


Step : 4 Create table which is store all free/use space related information of tablespace

create table db_growth
as select *
from (
select sysdate,t2.total "TOTAL_DISK_USAGE",t1.free "FREE_SPACE",(t2.total-t1.free) "USED_SPACE",(t1.free/t2.total)*100 "FREE% "
from
(select sum(bytes)/1024/1024 free
from dba_free_space) t1 ,
(select sum(bytes)/1024/1024 total
from dba_Data_files) t2
);

Step : 5 Insert free space information in DB_GROWTH table (if you want to populate data Manually)

insert into db_growth
select *
from (
select sysdate,t2.total "TOTAL_SIZE",t1.free "FREE_SPACE",(t2.total-t1.free) "USED_SPACE",(t1.free/t2.total)*100 "FREE%"
from
(select sum(bytes)/1024/1024 free
from dba_free_space) t1 ,
(select sum(bytes)/1024/1024 total
from dba_Data_files) t2
);

COMMIT;


Step : 6 Create View on DB_GROWTH based table ( This Steps is Required if you want to populate data automatically)


create view v_db_growth
as select *
from
(
select sysdate,t2.total "TOTAL_SIZE",t1.free "FREE_SPACE",(t2.total-t1.free) "USED_SPACE",(t1.free/t2.total)*100 "FREE%"
from
(select sum(bytes)/1024/1024 free
from dba_free_space) t1 ,
(select sum(bytes)/1024/1024 total
from dba_Data_files) t2
)
;

Step : 7 Insert data into DB_GROWTH table from V_DD_GROWTH view


insert into db_growth select *
from v_db_growth;
COMMIT;


Step : 8 Check everything goes fine.

select * from db_growth;

Check Result

Step : 9 Execute following SQL for more time stamp information

alter session set nls_date_format ='dd-mon-yyyy hh24:mi:ss' ;
Session altered.

Step : 10 Create a DBMS jobs which execute after 24 hours

declare
jobno number;
begin
dbms_job.submit(
jobno, 'begin insert into db_growth select * from v_db_growth;commit;end;', sysdate, 'SYSDATE+ 1', TRUE);
commit;
end;
/


PL/SQL procedure successfully completed.

Step: 11 View your dbms jobs and it's other information

select * from user_jobs;


-----If you want to execute dbms jobs manually execute following command other wise jobs is executing automatically

exec dbms_job.run(ENTER_JOB_NUMBER)
exec dbms_job.run(23);



PL/SQL procedure successfully completed.

exec dbms_job.remove(21); ------to remove a job


Step: 12 Finally all data populated in db_growth table

select * from db_growth;

Index Clustering Factor

The clustering_factor measures how synchronized an index is with the data in a table. A table with a high clustering factor is out-of-sequence with the rows and large index range scans will consume lots of I/O. Conversely, an index with a low clustering_factor is closely aligned with the table and related rows reside together of each data block, making indexes very desirable for optimal access.

Oracle provides a column called clustering_factor in the dba_indexes view that provides information on how the table rows are synchronized with the index. The table rows are synchronized with the index when the clustering factor is close to the number of data blocks and the column value is not row-ordered when the clustering_factor approaches the number of rows in the table.


select a.index_name, a.num_rows, a.clustering_factor, b.blocks,b.avg_row_len from user_indexes a, user_tables b
where a.num_rows !=0 and a.table_name = b.table_name order by 2 desc,1 desc;


Un-Clustered Table Rows
clustering_factor ~= num_rows

Clustered Table Rows
clustering_factor ~= blocks

------------------------------------------------------------------------------------------------------------------------------------------------
- A good CF is equal (or near) to the values of number of blocks of table.

- A bad CF is equal (or near) to the number of rows of table.

- Rebuilding of index can improve the CF.

Then how to improve the CF?

- To improve the CF, it’s the table that must be rebuilt (and reordered).
- If table has multiple indexes, careful consideration needs to be given by which index to order table.

------------------------------------------------------------------------------------------------------------------------------------------------
Four factors work together to help the CBO decide whether to use an index or a full-table scan: the selectivity of a column value, the db_block_size, the avg_row_len, and the cardinality. An index scan is usually faster if a data column has high selectivity and a low clustering_factor.


when a column has high selectivity, a high clustering_factor, and small avg_row_len, there is still indication that column values are randomly distributed in the table, and an additional I/O will be required to obtain the rows. An index range scan would cause a huge amount of unnecessary I/O as shown in below, thus making a full-table scan more efficient.

---------------------------------------Calculating the Clustering Factor

To calculate the clustering factor of an index during the gathering of index statistics, Oracle does the following.

For each entry in the index Oracle compares the entry's table rowid block with the block of the previous index entry.

If the block is different, Oracle increments the clustering factor by 1.

If the clustering factor is close to the number of entries in the index, then an index range scan of 1000 index entries may require nearly 1000 blocks to be read from the table.

If the clustering factor is close to the number of blocks in the table, then an index range scan of 1000 index entries may require only 50 blocks to be read from the table.

CTAS create table as select

Index Hint is best solution



----------------------------------CTAS with ORDER BY

create table transactions14 as select * from transactions;
50 SEC

create table transactions15 as select * from transactions ORDER BY FIRMNUMBER,TRANSACTION,SUBTRANS;
90 SEC



--------------------------------------------Parallel CTAS

create table transactions16 parallel (degree 2) as select * from transactions ORDER BY FIRMNUMBER,TRANSACTION,SUBTRANS;
120 SEC


create table transactions17 parallel (degree 2) as select * from transactions;
40 SEC



create table transactions18 parallel (degree 4) as select * from transactions;

50 SEC

create table transactions20 parallel (degree 8) as select * from transactions;

55 SEC



------------------------------------CTAS using INDEX hint---

SELECT * FROM dba_ind_columns WHERE table_name='TRANSACTIONS';

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions;
8 sec


create table transactions23 as select /*+ index(FIRMNUMBER) */ * from transactions;
8 sec


----------------------CTAS WITH PRIMARY KEY

create table transactions24 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions;

ALTER TABLE transactions24 ADD constraint pk_SAUDA23 PRIMARY KEY(FIRMNUMBER,TRANSACTION,SUBTRANS)


-----------------------------------------------------------------------
create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions where 1=2;

insert into transactions22 (select * from transactions);
30 sec

insert into transactions22 (select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions);
30sec


insert /*+ parallel(transactions22,2) */ into transactions22 (select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions);
60sec


-----------------------------------------------------------------------

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions where 1=2;

CREATE UNIQUE INDEX "LDBO"."PK_SAUDA1" ON "LDBO"."TRANSACTIONS22" ("FIRMNUMBER", "TRANSACTION", "SUBTRANS") PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS STORAGE(INITIAL 1610612736 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645 PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT) TABLESPACE "INDX" ;

analyze table transactions22 compute STATISTICS;

analyze index PK_SAUDA1 compute STATISTICS;


---------------------------------------------no append

insert into dest select * from source1;
189SEC


---------------------------------------------append
insert /*+ append */ into dest select * from source1;

----------------------------------------CTAS, no parallel--------------

insert /*+ append */ into dest select * from source1;


create table dest as select * from source1;

----------------------------------------CTAS, parallel--------------

alter session force parallel ddl parallel 3;
alter session force parallel query parallel 3;

create table transactions22 as select * from transactions;

40SEC

----------------------------------------CTAS, parallel WITH INDEX--------------

alter session force parallel ddl parallel 3;
alter session force parallel query parallel 3;

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from transactions;


----------------------------------GOOD
CTAS INDEX > CTAS PARALLEL DLL > APPEND

---------------------------------------------------------------------------------------------------

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink where 1=2;

insert into transactions22 (select * from ldbo.transactions@cmldlink);

20 min
---------------------------------------------------

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink where 1=2;

insert into transactions22 (select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink);

2 min 10 sec
-----------------------------------

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink where 1=2;

insert /*+ parallel(transactions22) */ into transactions22 (select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink);

2 min 10 sec

-------------------------------------

create table transactions23 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink;

60 sec


-------------------------------------------------------

create table transactions23 as select /*+ index(TRANSACTIONS PK_SAUDAPRIMARY) */ * from ldbo.transactions@cmldlink;
60 SEC

----------------------

create table transactions23 as select /*+ index(TRANSACTIONS PK_SAUDAPRIMARY,IDXCLIENTSAUDA,IDXCLIENTBRSAUDA) */ * from ldbo.transactions@cmldlink;

10 MIN

--------------------------------------------------------

create table transactions24 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink where 1=2;

insert /*+ append */ into transactions24 select * from transactions23;

40 sec

--------------------------------------------------

alter session force parallel ddl parallel 4;
alter session force parallel query parallel 4;

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink where 1=2;

insert into transactions22 (select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink);

2min


---------------------------------

alter session force parallel ddl parallel 2;
alter session force parallel query parallel 2;

create table transactions22 as select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink where 1=2;

insert into transactions22 (select /*+ index(FIRMNUMBER,TRANSACTION,SUBTRANS) */ * from ldbo.transactions@cmldlink);

2.5 min

Get DDL


GET_DEPENDENT_DDL(object_type, base_object_name, base_object_schema, version, model, transform, object_count)

GET_GRANTED_DDL(object_type, grantee, version, model, transform, object_count)


----------------------------------------------------------------------------------------------------

select DBMS_METADATA.GET_DDL('TABLE','ACCOUNTS')||'/' from dual;

----------GET_DEPENDENT_DDL
select DBMS_METADATA.GET_DEPENDENT_DDL('INDEX','ACCOUNTS') aa from dual;

select DBMS_METADATA.GET_DEPENDENT_DDL('TRIGGER','ACCOUNTS') aa from dual;

select DBMS_METADATA.GET_DEPENDENT_DDL('OBJECT_GRANT','ACCOUNTS') aa from dual;

SELECT  DBMS_METADATA.GET_DEPENDENT_DDL('CONSTRAINT','ACCOUNTS') from dual;

SELECT  DBMS_METADATA.GET_DEPENDENT_DDL('REF_CONSTRAINT','ACCOUNTS') from dual;


--------------------------------
select DBMS_METADATA.GET_GRANTED_DDL('SYSTEM_GRANT','<schema>') from dual;

select DBMS_METADATA.GET_GRANTED_DDL('ROLE_GRANT','<schema>') from dual;

select DBMS_METADATA.GET_GRANTED_DDL('OBJECT_GRANT','<schema>') from dual;

select DBMS_METADATA.GET_GRANTED_DDL('OBJECT_GRANT','KSH') aa from dual;

-----------------------------------------------------------------------------------------------------------------------------
SET LONG 1000000

select dbms_metadata.get_ddl( 'USER', 'LDBO' ) from dual
UNION ALL
select dbms_metadata.get_granted_ddl( 'SYSTEM_GRANT', 'LDBO' ) from dual
UNION ALL
select dbms_metadata.get_granted_ddl( 'OBJECT_GRANT', 'LDBO' ) from dual
UNION ALL
select dbms_metadata.get_granted_ddl( 'ROLE_GRANT', 'LDBO' ) from dual
UNION ALL
select dbms_metadata.get_granted_ddl( 'TABLESPACE_QUOTA', 'LDBO' ) from dual;


-----------------------------------------------------------------------------------------------------------------------------
CREATE TABLE my_ddl (owner VARCHAR2(30),
                     table_name VARCHAR2(30),
                     ddl   CLOB);
INSERT INTO my_ddl (owner, table_name, ddl)
SELECT owner, table_name,
DBMS_METADATA.GET_DDL('TABLE', table_name, owner) ddl
 FROM DBA_TABLES WHERE OWNER = 'LDBO';

Table Actual Size

SELECT
owner, table_name, TRUNC(sum(bytes)/1024/1024) Meg
FROM
(SELECT segment_name table_name, owner, bytes
FROM dba_segments
WHERE segment_type = 'TABLE'
UNION ALL
SELECT i.table_name, i.owner, s.bytes
FROM dba_indexes i, dba_segments s
WHERE s.segment_name = i.index_name
AND s.owner = i.owner
AND s.segment_type = 'INDEX'
UNION ALL
SELECT l.table_name, l.owner, s.bytes
FROM dba_lobs l, dba_segments s
WHERE s.segment_name = l.segment_name
AND s.owner = l.owner
AND s.segment_type = 'LOBSEGMENT'
UNION ALL
SELECT l.table_name, l.owner, s.bytes
FROM dba_lobs l, dba_segments s
WHERE s.segment_name = l.index_name
AND s.owner = l.owner
AND s.segment_type = 'LOBINDEX')
WHERE owner ='LDBO'
GROUP BY table_name, owner
HAVING SUM(bytes)/1024/1024 > 10 /* Ignore really small tables */
ORDER BY SUM(bytes) desc
;

Schedule Job for Exe file

BEGIN
dbms_scheduler.create_job(
job_name => 'del_archive',
job_type => 'EXECUTABLE',
job_action => 'd:\ld\oracle\del.bat',
start_date => '14-MAR-12 4:52.00.00 PM ASIA/CALCUTTA',
repeat_interval => 'freq=DAILY',
enabled => TRUE,
comments => 'delete old archivelogs');
END;
/



exec DBMS_SCHEDULER.run_job ('del_archive');


BEGIN
DBMS_SCHEDULER.drop_JOB (job_name => 'del_archive');
END;
/

Role Recreation

set heading off verify off feedback off echo off term off linesize 200 wrap on

spool c:\temp\roles_creation.sql

SELECT 'Create Role '|| ROLE ||' ;' from dba_roles;

SELECT 'Grant '|| PRIVILEGE || ' to ' || GRANTEE || ';' FROM DBA_SYS_PRIVS where grantee not in ('SYS','SYSTEM','SYSMAN','TSMSYS','WMSYS','RECOVERY_CATALOG_OWNER','RESOURCE','OUTLN','ORACLE_OCM','OEM_MONITOR','OEM_ADVISOR','MGMT_USER','IMP_FULL_DATABASE','EXP_FULL_DATABASE','DBA','CONNECT','AQ_ADMINISTRATOR_ROLE','DBSNMP','SCHEDULER_ADMIN');

SELECT 'Grant '|| PRIVILEGE ||' on '|| TABLE_NAME || ' to ' || GRANTEE || ';' from dba_tab_privs Where Grantor='LDBO';

SELECT 'Grant update('|| COLUMN_NAME ||') on '|| TABLE_NAME || ' to ' || GRANTEE || ';' from dba_col_privs Where Grantor='LDBO';

spool off


Shrink Datafile Suggestion

select bytes/1024/1024 real_size,ceil( (nvl(hwm,1)*16384)/1024/1024 ) shrinked_size,
bytes/1024/1024-ceil( (nvl(hwm,1)*16384)/1024/1024 ) released_size
,'alter database datafile '|| ''''||file_name||'''' || ' resize ' || ceil( (nvl(hwm,1)*16384)/1024/1024 ) || ' m;' cmd
from
dba_data_files a,
( select file_id, max(block_id+blocks-1) hwm from dba_extents group by file_id ) b
where
tablespace_name='INDX'
and
a.file_id = b.file_id(+)
and ceil(blocks*16384/1024/1024)- ceil((nvl(hwm,1)* 16384)/1024/1024 ) > 0;

Wednesday, March 14, 2012

ORA-03297 file contains used data beyond requested RESIZE value

select
a.file_name,
a.bytes file_size_in_bytes,
(c.block_id+(c.blocks-1)) * &_BLOCK_SIZE HWM_BYTES,
a.bytes - ((c.block_id+(c.blocks-1)) * &_BLOCK_SIZE) SAVING
from dba_data_files a,
(select file_id,max(block_id) maximum
from dba_extents
group by file_id) b,
dba_extents c
where a.file_id = b.file_id
and c.file_id = b.file_id
and c.block_id = b.maximum
and c.tablespace_name = 'INDX'

ALTER DATABASE DATAFILE 'D:\lard1213\INDEX01.ORA' RESIZE 20000M;

Friday, February 17, 2012

ORA-00997: illegal use of LONG datatype (Migration Data LOB column)

SQL> CREATE GLOBAL TEMPORARY TABLE TMP_scan ON COMMIT PRESERVE ROWS as select FIRMNUMBER,CODE,PSCAN
NEDIMAGE,NFINANCIALYEAR from ldbo.CLIENTSCANNEDIMAGE@cmldlink;
CREATE GLOBAL TEMPORARY TABLE TMP_scan ON COMMIT PRESERVE ROWS as select FIRMNUMBER,CODE,PSCANNEDIMA
*
ERROR at line 1:
ORA-00997: illegal use of LONG datatype


SQL> create table CLIENTSCANNEDIMAGE as SELECT /*+ index(FIRMNUMBER,NFINANCIALYEAR,CODE) */ * from
ldbo.CLIENTSCANNEDIMAGE@cmldlink where 1=0;
create table CLIENTSCANNEDIMAGE as SELECT /*+ index(FIRMNUMBER,NFINANCIALYEAR,CODE) */ * from ldbo.
*
ERROR at line 1:
ORA-00997: illegal use of LONG datatype


SQL>
SQL> INSERT INTO CLIENTSCANNEDIMAGE SELECT /*+ index(FIRMNUMBER,NFINANCIALYEAR,CODE) */ FIRMNUMBER,
CODE,PSCANNEDIMAGE,NFINANCIALYEAR from ldbo.CLIENTSCANNEDIMAGE@cmldlink;
INSERT INTO CLIENTSCANNEDIMAGE SELECT /*+ index(FIRMNUMBER,NFINANCIALYEAR,CODE) */ FIRMNUMBER,CODE,
ERROR at line 1:
ORA-00997: illegal use of LONG datatype

SQL> DECLARE
2 CURSOR c IS
3 select FIRMNUMBER, CODE, PSCANNEDIMAGE, NFINANCIALYEAR from ldbo.CLIENTSCANNEDIMAGE@cmldlink;
4 rc c%ROWTYPE;
5 BEGIN
6 OPEN c;
7 LOOP
8 FETCH c INTO rc;
9 EXIT WHEN c%NOTFOUND;
10 INSERT INTO CLIENTSCANNEDIMAGE
11 ( FIRMNUMBER, CODE, PSCANNEDIMAGE, NFINANCIALYEAR )
12 VALUES ( rc.FIRMNUMBER, rc.CODE, rc.PSCANNEDIMAGE, rc.NFINANCIALYEAR );
13 END LOOP;
14 COMMIT;
15 END;
16 /
DECLARE
*
ERROR at line 1:
ORA-01406: fetched column value was truncated


---------------------Solution----------------------
at sqlplus>
use copy command remember enter after hypen (-) in sqlplus
copy from ldbo/ldbo@nbs1112srv -
create CLIENTSCANNEDIMAGE2 using select * from CLIENTSCANNEDIMAGE;

--------------------or------------------
CREATE TABLE "DPCDSL"."CLIENTSCANNEDIMAGE1"
(
"FIRMNUMBER" CHAR(10 BYTE) NOT NULL ENABLE,
"CODE" CHAR(10 BYTE) NOT NULL ENABLE,
"PSCANNEDIMAGE" LONG RAW,
"NFINANCIALYEAR" NUMBER(4,0) NOT NULL ENABLE
)
PCTFREE 15 PCTUSED 40 INITRANS 1 MAXTRANS 255 NOCOMPRESS LOGGING STORAGE
(
INITIAL 10485760 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645 PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT
)
TABLESPACE "USR" ;


copy from ldbo/ldbo@nbs1112srv -
insert CLIENTSCANNEDIMAGE1 using select * from CLIENTSCANNEDIMAGE;


----------



Thursday, February 16, 2012

Job schedule compile invalid objects

REQUIRE SYS PRIVILIGES TO EXECUTE UTL_RECOMP

BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'compile_invalid',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN UTL_RECOMP.recomp_serial(''LDBO''); END;',
start_date => '08-FEB-12 11:35.00.00 PM ASIA/CALCUTTA',
repeat_interval => 'freq=DAILY',
end_date => NULL,
enabled => TRUE,
comments => 'JOB to compile invalid objects');
END;
/



BEGIN
DBMS_SCHEDULER.drop_JOB (job_name => 'compile_invalid');
END;
/

exec DBMS_SCHEDULER.run_job ('compile_invalid');


select * from dba_scheduler_jobs;
select job_name,job_action,start_date,repeat_interval,end_date,run_count,failure_count from dba_scheduler_jobs where job_name='ANALYZE';

SELECT * FROM dba_scheduler_running_jobs;

Tuesday, February 14, 2012

ORA-00600: internal error code, arguments: [4193], [5396], [2242]

ORA-00607: Internal error occurred while making a change to a data block
ORA-00600: internal error code, arguments: [4193], [5396], [2242], [], [], [],[], []

rename spfile , startup from pfile

shut immediate
startup

CREATE UNDO TABLESPACE UNDOTBS2 DATAFILE 'F:\NBSD1112\UNDOTBS03.ORA' SIZE 500M REUSE AUTOEXTEND ON;

ALTER SYSTEM SET UNDO_TABLESPACE=UNDOTBS2;

shut immediate

change undo_tablespace=UNDOTBS2 into parameter file

startup

DROP TABLESPACE UNDOTBS1 INCLUDING CONTENTS AND DATAFILES;

CREATE UNDO TABLESPACE UNDOTBS1 DATAFILE 'F:\NBSD1112\UNDOTBS01.ORA' SIZE 500M REUSE AUTOEXTEND ON;

ALTER SYSTEM SET UNDO_TABLESPACE=UNDOTBS1;

shut immediate

change undo_tablespace=UNDOTBS1 into parameter file

startup

DROP TABLESPACE UNDOTBS2 INCLUDING CONTENTS AND DATAFILES;

Monday, February 13, 2012

Server Capacity Planning

1) Existing server configuration (Processor, No of CPU, RAM, Disk Capacity, … , …)
2) No. of running databases on server
3) Databases folder size of all years
4) No of Users ( concurrent connections) in the database
5) Weekly or monthly growth of databases.
6) Oracle core license??

----------------------------Memory----------------
select * from dba_high_water_mark_statistics where name in ('SESSIONS','DB_SIZE');
select * from v$resource_limit;
--------maximum amount of memory allocated by the currently connected sessions
SELECT SUM (value) "max memory allocation" FROM v$sesstat ss, v$statname st WHERE st.name = 'session uga memory max' AND ss.statistic# = st.statistic#;

------------------pga requirement------------

select
(select highwater from dba_high_water_mark_statistics where name = ('SESSIONS');)*(2048576+a.value+b.value) pga_size
from
v$parameter a,
v$parameter b
where
a.name = 'sort_area_size'
and
b.name = 'hash_area_size'
;


-----------------------CPU Benchmark------------------------------
http://www.cpubenchmark.net/multi_cpu.html

-----------------------Space Management---------------

As per database growth weekly /monthly and planning for how many year

SGA PGA measuring

select * from v$sgastat order by 1;
select * from v$pgastat order by 1;


I noticed that you have some Pools in your SGA which are not used:

large pool free memory 209715200

But your PGA could reach about 340 Mo.

So, you may decrease about 160 Mo the large_pool_size parameter (you have 200 Mo free).

It will decrease the SGA (about 160 Mo).

Then you may increase the PGA_AGGREGATE_TARGET to 512 Mo.

The most important is that SGA + PGA remains below 2GB (except if you use /3GB parameter
which may help you to get 1 GB more).

------------------------Used SGA-----------------

select name, round(sum(mb),1) mb, round(sum(inuse),1) inuse
from (select case when name = 'buffer_cache'
then 'db_cache_size'
when name = 'log_buffer'
then 'log_buffer'
else pool
end name,
bytes/1024/1024 mb,
case when name <> 'free memory'
then bytes/1024/1024
end inuse
from v$sgastat
)group by name;


------------------------Free SGA-----------------

select name, round(sum(mb),1) mb, round(sum(inuse),1) free
from (select case when name = 'buffer_cache'
then 'db_cache_size'
when name = 'log_buffer'
then 'log_buffer'
else pool
end name,
bytes/1024/1024 mb,
case when name = 'free memory'
then bytes/1024/1024
end inuse
from v$sgastat
)group by name;

--------------------

select name,value from v$parameter where name ='sort_area_size';
---------------------------------- maximum PGA usage per process:--
select
max(pga_used_mem) max_pga_used_mem
, max(pga_alloc_mem) max_pga_alloc_mem
, max(pga_max_mem) max_pga_max_mem
from v$process
/

-----------sum of all current PGA usage per process---------
select
sum(pga_used_mem) sum_pga_used_mem
, sum(pga_alloc_mem) sum_pga_alloc_mem
, sum(pga_max_mem) sum_pga_max_mem
from v$process
/

-----------pga requirement as per high water mark

select
(select highwater from dba_high_water_mark_statistics where name = ('SESSIONS'))*(2048576+a.value+b.value)/1024/1024 pga_size_MB
from
v$parameter a,
v$parameter b
where
a.name = 'sort_area_size'
and
b.name = 'hash_area_size'
;

pga requirement as per high watermark

select
(select highwater from dba_high_water_mark_statistics where name = ('SESSIONS'))*(2048576+a.value+b.value)/1024/1024 pga_size_MB
from
v$parameter a,
v$parameter b
where
a.name = 'sort_area_size'
and
b.name = 'hash_area_size'
;

Thursday, February 9, 2012

Invalid Object Why?????????????

SELECT owner || '.' || object_name invalid_object,'--- ' || object_type || ' ---' likely_reason
FROM dba_objects WHERE status = 'INVALID' AND owner = 'LDBO'
UNION
SELECT d.owner || '.' || d.name,'Non-existent referenced db link ' || d.referenced_link_name
FROM dba_dependencies d WHERE NOT EXISTS
(
SELECT 'x'
FROM dba_db_links WHERE owner IN ('PUBLIC', d.owner)
AND db_link = d.referenced_link_name
)
AND d.referenced_link_name IS NOT NULL
AND (d.owner, d.name, d.type) IN
(
SELECT owner, object_name, object_type
FROM dba_objects WHERE status = 'INVALID'
)
AND d.owner = 'LDBO'
UNION
SELECT d.owner || '.' || d.name,'Depends on invalid ' || d.referenced_type || ' '|| d.referenced_owner || '.' || d.referenced_name
FROM dba_objects ro,dba_dependencies d
WHERE ro.status = 'INVALID' AND ro.owner = d.referenced_owner AND ro.object_name = d.referenced_name
AND ro.object_type = d.referenced_type AND d.referenced_link_name IS NULL
AND (d.owner, d.name, d.type) in
(
SELECT owner, object_name, object_type
FROM dba_objects
WHERE status = 'INVALID'
)
AND d.owner = 'LDBO'
UNION
SELECT d.owner || '.' || d.name,'Depends on newer ' || d.referenced_type || ' '|| d.referenced_owner || '.' || d.referenced_name
FROM dba_objects ro,dba_dependencies d,dba_objects o
WHERE NVL(ro.last_ddl_time, ro.created) > NVL(o.last_ddl_time, o.created)
AND ro.owner = d.referenced_owner AND ro.object_name = d.referenced_name
AND ro.object_type = d.referenced_type AND d.referenced_link_name IS NULL
AND d.owner = o.owner AND d.name = o.object_name AND d.type = o.object_type
AND o.status = 'INVALID' AND d.owner = 'LDBO'
UNION
SELECT d.owner || '.' || d.name,'Depends on ' || d.referenced_type || ' '|| d.referenced_owner || '.' || d.referenced_name
|| DECODE(d.referenced_link_name,NULL, '','@' || d.referenced_link_name)
FROM dba_dependencies d WHERE d.referenced_owner != 'PUBLIC' -- Public synonyms generate noise
AND d.referenced_type = 'NON-EXISTENT'
AND (d.owner, d.name, d.type) IN
(
SELECT owner, object_name, object_type
FROM dba_objects WHERE status = 'INVALID'
)
AND owner = 'LDBO'
UNION
SELECT d.owner || '.' || d.name invalid_object,'No privilege on referenced ' || d.referenced_type || ' '
|| d.referenced_owner || '.' || d.referenced_name
FROM dba_objects ro,dba_dependencies d
WHERE NOT EXISTS
(
SELECT 'x' FROM dba_tab_privs p WHERE p.owner = d.referenced_owner
AND p.table_name = d.referenced_name AND p.grantee IN ('PUBLIC', d.owner)
)
AND ro.status = 'VALID'
AND ro.owner = d.referenced_owner
AND ro.object_name = d.referenced_name
AND d.referenced_link_name IS NOT NULL
AND (d.owner, d.name, d.type) IN
(
SELECT owner, object_name, object_type
FROM dba_objects WHERE status = 'INVALID'
)
AND d.owner = 'LDBO'
UNION
SELECT o.owner || '.' || o.object_name, e.text
FROM dba_errors e, dba_objects o
WHERE e.text LIKE 'PLS-%' AND e.owner = o.owner AND e.name = o.object_name
AND e.type = o.object_type AND o.status = 'INVALID' AND o.owner = 'LDBO'
/

Wednesday, February 8, 2012

Analyze Scheduling using oracle




---------------------------------------------------------------------frequency 1 day-----------------------------------------

BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'analyze',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN DBMS_STATS.gather_schema_stats(''LDBO'',CASCADE=>TRUE); END;',
start_date => '01-APR-12 11.00.00 PM ASIA/CALCUTTA',
repeat_interval => 'freq=DAILY',
end_date => '02-APR-13 11.00.00 PM ASIA/CALCUTTA',
enabled => TRUE,
comments => 'JOB to gather LDBO statistics');
END;
/


----------------- frequency 2 hours---------------------------------------

BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'analyze1',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN DBMS_STATS.gather_schema_stats(''LDBO'',CASCADE=>TRUE); END;',
start_date => '16-FEB-12 06.00.00 PM ASIA/CALCUTTA',
repeat_interval=> 'FREQ=HOURLY;INTERVAL=2',
end_date => '02-APR-13 11.00.00 PM ASIA/CALCUTTA',
enabled => TRUE,
comments => 'JOB to gather LDBO statistics every 2 hours');
END;
/

------------------------------------------frequency syntax

FREQ=[YEARLY | MONTHLY | WEEKLY | DAILY | HOURLY | MINUTELY | SECONDLY] ;


-------------------To run a job every Tuesday at 11:25

FREQ=DAILY; BYDAY=TUE; BYHOUR=11; BYMINUTE=25;

FREQ=WEEKLY; BYDAY=TUE; BYHOUR=11; BYMINUTE=25;

FREQ=YEARLY; BYDAY=TUE; BYHOUR=11; BYMINUTE=25;



------------------ To run a job Tuesday and Thursday at 11, 14 and 22 o'clock

FREQ=WEEKLY; BYDAY=TUE,THUR; BYHOUR=11,14,22;

EXPDP Data Pump Job Scheduling with rename dump and remove old files

1) create directory export_auto as 'd:\expdp1213';

create user dba_export_user identified by test123;

grant connect, create database link, resource, create view to dba_export_user;
grant unlimited tablespace to dba_export_user;
grant exp_full_database to dba_export_user;
grant read,write on directory export_auto to dba_export_user;
grant execute on dbms_flashback to dba_export_user;
grant create table to dba_export_user;
grant FLASHBACK ANY TABLE to dba_export_user;


2)

CREATE OR REPLACE PROCEDURE dba_export_user.start_export
IS
hdl_job NUMBER;
l_cur_scn NUMBER;
l_job_state VARCHAR2 (20);
l_status SYS.ku$_status1010;
l_job_status SYS.ku$_jobstatus1010;
BEGIN

begin
execute immediate 'drop table dba_export_user.AUTO_EXPORT';
exception when others then null;
end;

hdl_job := DBMS_DATAPUMP.OPEN ( operation => 'EXPORT', job_mode => 'FULL', job_name => 'AUTO_EXPORT' );
DBMS_DATAPUMP.add_file (handle => hdl_job,filename => 'exp1213.dmp',directory => 'EXPORT_AUTO',filetype => DBMS_DATAPUMP.ku$_file_type_dump_file);
DBMS_DATAPUMP.add_file (handle => hdl_job,filename => 'export.log',DIRECTORY => 'EXPORT_AUTO',filetype => DBMS_DATAPUMP.ku$_file_type_log_file);
DBMS_DATAPUMP.start_job (handle => hdl_job);
DBMS_DATAPUMP.wait_for_job (handle => hdl_job, job_state => l_job_state);
DBMS_OUTPUT.put_line ('Job exited with status:' || l_job_state);

DBMS_DATAPUMP.detach(handle => hdl_job);

----------------------RENAME BACKUP WITH DATE
begin
UTL_FILE.FRENAME ('EXPORT_AUTO','exp1213.DMP','EXPORT_AUTO','exp1213'||'_'||TO_CHAR(SYSDATE,'DDMMYYYY')||'.DMP');
end;

begin
UTL_FILE.FRENAME ('EXPORT_AUTO','export.log','EXPORT_AUTO','export'||'_'||TO_CHAR(SYSDATE,'DDMMYYYY')||'.LOG');
end;

------------DELETE TWO DAYS BEFORE BACKUP
begin
UTL_FILE.FREMOVE ('EXPORT_AUTO','exp1213'||'_'||TO_CHAR(SYSDATE-2,'DDMMYYYY')||'.DMP');
end;

begin
UTL_FILE.FREMOVE ('EXPORT_AUTO','export'||'_'||TO_CHAR(SYSDATE-2,'DDMMYYYY')||'.log');
end;

END;
/


3) Change the time, Date

begin
dbms_scheduler.create_job(
job_name => 'EXPORT_JOB'
,job_type => 'STORED_PROCEDURE'
,job_action => 'dba_export_user.start_export'
,start_date => '08-FEB-12 06.02.00.00 PM ASIA/CALCUTTA'
,repeat_interval => 'FREQ=DAILY; BYDAY=MON,TUE,WED,THU,FRI,SAT,SUN;'
,enabled => TRUE
,comments => 'EXPORT_DATABASE_JOB');
end;
/


Note: Rename the dmp file with sysdate on daily basis before next schedule time

manually execute backup job
EXEC dba_export_user.start_export;

check running job status
select * from DBA_datapump_jobs;

drop job
EXEC dbms_scheduler.drop_job('dba_export_user.start_export');

Monday, February 6, 2012

ORACLE AUDIT FOR ALTER COMMAND



CREATE TABLE DBA_AUDIT_TAB_KSH (USERNAME VARCHAR2(10), SQL_TEXT VARCHAR2(2000),TIMESTAMP DATE);

CREATE OR REPLACE TRIGGER DBA_AUDIT_KSH
BEFORE ALTER ON SCHEMA
DECLARE
sql_text ora_name_list_t;
stmt VARCHAR2(2000);
n integer;
dt date;
BEGIN
null;
IF (ora_dict_obj_type IN ( 'TABLE') )
then
n:= ora_sql_txt(sql_text);
FOR i IN 1..n LOOP
stmt := stmt || sql_text(i);
END LOOP;
dt:=TO_DATE(SYSDATE,'DD-MM-YYYY HH24:MI:SS');
INSERT INTO DBA_AUDIT_TAB_KSH (username,sql_text,timestamp) VALUES (user,stmt,dt);

END IF;
END DBA_AUDIT_KSH;
/


Saturday, February 4, 2012

Performance Tuning Basic Guidelines

** Redo Log files – ensure that redo log are allocated on the fast disk, with minimum activities.
** Temporary tablespaces – ensure that temporary tablespaces are allocated on the fast disk, with minimum activities.
** Fragmentation of tablespaces – defragmentize tablespaces, equal blocksize for INITIAL and NEXT extents.
** Shared Pool Sizing – 1/3 or more of total physical memory, and check for thrashing/paging/swapping activity.
** DB_BLOCK_BUFFER – to enable buffering of data from datafiles during query and updates/inserts operation.
** Use BIND variables – to minimize parsing of SQL and enable SQL area reuse, and standardize bind-variable naming conventions.
** Identical SQL statements – literally identical – to enable SQL area reuse.
** Initial/Next Extents sizing – ensure initial and next are the same. Should be as small as possible to avoid wastage of spaces, but at the same time large enough to minimize time spent in frequent

allocation.
** PCTINCREASE – zero to ensure minimum fragmentization.
** Small PCTUSED and large PCTFREE – to ensure sufficient spaces for INSERT intensive operation.
** Freelist groups – large values to ensure parallelization of INSERT-intensive operation.
** INITRANS and MAXTRANS – large values to enable large number of concurrent transactions to access tables.
** Readonly tablespaces – to minimize latches/enqueues resources, as well as PINGING in OPS.
** Create indexes for frequently accessed columns – especially for range scanning and equality conditions in “where” clause.
** Use hash indexes if equality conditions is used, and no range scanning involved.
** If joining of tables is used frequently, consider Composite Indexes.
** Use Clustered tables – columns allocated together.
** Create Index-Organized Tables when data is mostly readonly – to localize both the data and indexes together.
** Use PARALLEL hints to make sure Oracle parallel query is used.
** IO slaves – to enable multiple DB writers to write to disks.
** Minextents and Maxextents sizing – ensure as large as possible to enable preallocation.
** Avoid RAID5 – IO intensive (redo log, archivelog, temporary tablespace, RBS etc)
** MTS mode – to optimize OLTP transaction, but not BATCH environment.
** Partition Elimination – to enable unused tablespaces partition to be archived.
** Performance hit seriously when bitmap indexes used in table with heavy DML. Might have to drop and recreate the bitmap indexes.
** Increase LOG_SIMULTANEOUS_COPIES – minimize contention for redo copy latches.
** In SQLLoader - using direct path over conventional path loading.
** Using parallel INSERT... SELECT when inserting data that already exists in another table in the database – faster than parallel direct loader using SQLLoader.
** Create table/index using UNRECOVERABLE option to minimize REDO LOG updating. SQLloading can use unrecoverable features, or ARCHIVELOG disabled.
** Alter index REBUILD parallel 2 – to enable 2 parallel processes to index concurrently.
** Use large redo log files to minimize log switching frequency.
** Loading is faster when using SQLLOADING if data source is presorted in a file.
** Drop the indexes, and disable all the constraints, when using SQLloader. Recreate the indexes after SQLloader has completed.
** Use Star Query for Data Warehousing-like application: /*+ ORDERED USE_NL(facts) INDEX(facts fact_concat) */ or /*+ STAR */.
** Using Parallel DDL statements in:
** CREATE INDEX
** CREATE TABLE ... AS SELECT
** ALTER INDEX ... REBUILD
** The parallel DDL statements for partitioned tables and indexes are:
** CREATE TABLE ... AS SELECT
** CREATE INDEX
** ALTER TABLE ... MOVE PARTITION
** ALTER TABLE ... SPLIT PARTITION
** ALTER INDEX ... REBUILD PARTITION
** ALTER INDEX ... SPLIT PARTITION
** Parallel analyze on partitioned table - ANALYZE {TABLE,INDEX} PARTITION.
** Using Asynchronous Replication instead of Synchrnous replication.
** Create snapshot log to enable fast-refreshing.
** In Replication, use parallel propagation to setup multiple data streams.
** Using ALTER SESSION ….HASHED_JOINED_ENABLED.
** Using ALTER SESSION …. ENABLE PARALLEL DML.
** Use ANALYZE TABLE….ESTIMATE STATISTICS for large tables, and COMPUTE STATISTICS for small table, to create statistics to enable Cost-Based Optimizer to made more accurate decision on

optimization technique for the query.
** To reduce contention on the rollback segments, at most 2 parallel process transactions should reside in the same rollback segment.
** To speed up transaction recovery, the initialization parameter CLEANUP_ROLLBACK_ENTRIES should be set to a high value approximately equal to the number of rollback entries generated for the forward-

going operation.
** Using raw devices/partition instead of file system partition.
** Consider increasing the various sort related parameters:
** sort_area_size
** sort_area_retained_size
** sort_direct_writes
** sort_write_buffers
** sort_write_buffer_size
** sort_spacemap_size
** sort_read_fac
** Tune the database buffer cache parameter BUFFER_POOL_KEEP and BUFFER_POOL_RECYCLE to keep the buffer cache after use, or age out the data blocks to recycle the buffer cache for other data.
** Larger values of LOG_BUFFER reduce log file I/O, particularly if transac-tions are long or numerous. The default setting is four times the maximum data block size for the host operating system.
** DB_BLOCK_SIZE should be multiple of OS block size.
** SHARED_POOL_SIZE –The size in bytes of the area devoted to shared SQL and PL/SQL statements.
** The LOCK_SGA and LOCK_SGA_AREAS parameters lock the entire SGA or particular SGA areas into physical memory.
** You can force Oracle to load the entire SGA into main memory by set ting the PRE_PAGE_SGA=TRUE in the init.ora file. This load slows your startup process slightly, but eliminates cache misses on the

library and data dictionary during normal runs.
** Enable DB_BLOCK_CHECKSUM if automatic checksum on datablocks is needed, performance will be degraded slightly.
** Use EXPLAIN PLAN to understand how Oracle process the query – utlxplan.sql.
** Choose between FIRST_ROWS or ALL_ROWS hint in an individual SQL state-ment to determine the best response time required for returning data.
** Use bitmap indexes for low cardinality data.
** Use full-table scan when the data selected ranged over a large percentage of the tables.
** Use DB_FILE_MULTIBLOCK_READ_COUNT – to enable full table scans by a single multiblock read. Increase this value if full table scan is desired.
** Check if row migration or row chaining has occurred - running utlchain.sql.
** Choose between offline backup or online backup plan.

Monday, January 2, 2012

ORA-01114: IO error writing block to file 202 (block # 1473756)

Linux-x86_64 Error: 25: Inappropriate ioctl for device


ERROR at line 29:
ORA-01114: IO error writing block to file 202 (block # 1473756)
ORA-27072: File I/O error
Linux-x86_64 Error: 25: Inappropriate ioctl for device
Additional information: 4
Additional information: 1473756
Additional information: 90112
ORA-01114: IO error writing block to file 202 (block # 1473756)
ORA-27072: File I/O error
Linux-x86_64 Error: 25: Inappropriate ioctl for device
Additional information: 4
Additional information: 1473756
Additional information: 90112


---------------
select tablespace_name,file_name,bytes/1024/1024,status,autoextensible,increment_by,user_bytes/1024/1024 from dba_temp_files;

----------------------------------------------Existing Temp Tablespace------------------------------------
/ldccm_data1/ora11g/ldccmd/temp/ldccm_temp01.dbf
1024 ONLINE NO 0 1023

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp02.dbf
23028 ONLINE YES 1 23027

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp03.dbf
23026 ONLINE YES 1 23025


FILE_NAME
----------------------------------------------------------------
BYTES/1024/1024 STATUS AUT INCREMENT_BY USER_BYTES/1024/1024
--------------- ------- --- ------------ --------------------
/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp04.dbf
23026 ONLINE YES 1 23025

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp05.dbf
23026 ONLINE YES 1 23025

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp06.dbf
23026 ONLINE YES 1 23025



---------------------------------Solution-------------------

/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp01.dbf

1)
CREATE TEMPORARY TABLESPACE temp2
TEMPFILE '/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp10.dbf' SIZE 1024M REUSE
AUTOEXTEND ON NEXT 500M MAXSIZE unlimited
EXTENT MANAGEMENT LOCAL UNIFORM SIZE 500M;

2)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temp2;

3)
DROP TABLESPACE temporary INCLUDING CONTENTS AND DATAFILES;


---if dropping hang then restart oracle services or do following

SELECT b.tablespace,b.segfile#,b.segblk#,b.blocks,a.sid,a.serial#,
a.username,a.osuser, a.status
FROM v$session a,v$sort_usage b
WHERE a.saddr = b.session_addr;

alter system kill session 'SID_NUMBER, SERIAL#NUMBER'; kill those session that are not being used actually.

Now dropping the previous tablespace
DROP TABLESPACE


4)

CREATE TEMPORARY TABLESPACE temporary
TEMPFILE '/ldccm_temptbs/ora11g/ldccmd/temp/ldccm_temp01.dbf' SIZE 20480M REUSE
AUTOEXTEND ON NEXT 1024M;

5)
ALTER DATABASE DEFAULT TEMPORARY TABLESPACE temporary;

6)
DROP TABLESPACE temp2 INCLUDING CONTENTS AND DATAFILES;
7)

SELECT tablespace_name, file_name, bytes
FROM dba_temp_files WHERE tablespace_name = 'temporary';


-------------------------------------------

Followers