Wednesday, March 23, 2011

Daily Client Brokerage with Branch

---------------------Daily Client Brokerage with Branch----------------------------------
select distinct ClientCode,Brcode,Dtoftran, sum(Grosspurch) Purchases, sum(Grosssales) Sales, sum(Totalvol) GrossTurnover, sum(Netpsvalue) NetTurnover, sum(Brokerage) Brokerage, sum(Volper), sum(NetBrokerage), sum(Netvolper), sum(nTurnpie), sum(nGrossbrkpie),sum(nNetbrkpie) from (Select sauda.oowncode ClientCode,sauda.brcode Brcode,Sauda.Dtoftran as Dtoftran, Sum(Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosspurch,Sum(Decode(Sauda.Buysell,'S',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosssales,Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Orignalno,0, Sauda.Quantity*Sauda.Market,0))) as Totalvol, Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Buysell,'B', Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0), Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market*-1,0)))) as Netpsvalue,Sum((Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Final_rat1-Sauda.Oldrate),Sauda.Quantity*(Sauda.Final_rat1-Sauda.Market)),Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Oldrate-Sauda.Final_rat1),Sauda.Quantity*(Sauda.Market-Sauda.Final_rat1))))+Sauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From Sauda Sauda Where Sauda.Firmnumber='SNS-000001' and Sauda.Egroup!='MB' and Sauda.Egroup!='YC' and Sauda.Egroup!='MO' and Sauda.Egroup!='OK' and Sauda.Egroup!='ST' and Sauda.Sterminal!='99999' and Sauda.Sterminal!='99998' and trim(dtoftran)=trunc(SYSDATE) Group by Sauda.Dtoftran,sauda.oowncode,sauda.brcode Union Select fosauda.oowncode ClientCode,fosauda.brcode Brcode,foSauda.Dtoftran as Dtoftran, Sum(Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0)),0)) as Grosspurch,Sum(Decode(foSauda.Buysell,'S',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0)),0)) as Grosssales,Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Orignalno,0, foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0))) as Totalvol, Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Buysell,'B', Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0), Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice)*-1,0)))) as Netpsvalue,Sum((Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Final_rat1-foSauda.Oldrate),foSauda.Quantity*(foSauda.Final_rat1-foSauda.Market)),Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Oldrate-foSauda.Final_rat1),foSauda.Quantity*(foSauda.Market-foSauda.Final_rat1))))+foSauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From foSauda foSauda Where foSauda.Firmnumber='SNS-000001' and foSauda.Egroup!='MB' and foSauda.Egroup!='YC' and foSauda.Egroup!='MO' and foSauda.Egroup!='OK' and foSauda.Egroup!='ST' and foSauda.Sterminal!='99999' and foSauda.Sterminal!='99998' and fosauda.saudatype != 'B/' and fosauda.oowncode!='F12838' and trim(dtoftran)=trunc(SYSDATE) Group by foSauda.Dtoftran,fosauda.oowncode,fosauda.brcode ) group by clientcode,brcode,dtoftran

-----------Total_Brokerage_Datewise-----------------
select sum(brokerage) "Total Brokerage",dtoftran "Date" from (Select /*+ FULL(Sauda) */ Sauda.Dtoftran as Dtoftran, Sum(Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosspurch,Sum(Decode(Sauda.Buysell,'S',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosssales,Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Orignalno,0, Sauda.Quantity*Sauda.Market,0))) as Totalvol, Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Buysell,'B', Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0), Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market*-1,0)))) as Netpsvalue,Sum((Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Final_rat1-Sauda.Oldrate),Sauda.Quantity*(Sauda.Final_rat1-Sauda.Market)),Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Oldrate-Sauda.Final_rat1),Sauda.Quantity*(Sauda.Market-Sauda.Final_rat1))))+Sauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From Sauda Sauda Where Sauda.Firmnumber='SNS-000001' and Sauda.Egroup!='MB' and Sauda.Egroup!='YC' and Sauda.Egroup!='MO' and Sauda.Egroup!='OK' and Sauda.Egroup!='ST' and Sauda.Sterminal!='99999' and Sauda.Sterminal!='99998' Group by Sauda.Dtoftran Union Select /*+ FULL(foSauda) */ foSauda.Dtoftran as Dtoftran, Sum(Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*foSauda.Market,0)),0)) as Grosspurch,Sum(Decode(foSauda.Buysell,'S',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*foSauda.Market,0)),0)) as Grosssales,Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Orignalno,0, foSauda.Quantity*foSauda.Market,0))) as Totalvol, Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Buysell,'B', Decode(foSauda.Orignalno,0,foSauda.Quantity*foSauda.Market,0), Decode(foSauda.Orignalno,0,foSauda.Quantity*foSauda.Market*-1,0)))) as Netpsvalue,Sum((Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Final_rat1-foSauda.Oldrate),foSauda.Quantity*(foSauda.Final_rat1-foSauda.Market)),Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Oldrate-foSauda.Final_rat1),foSauda.Quantity*(foSauda.Market-foSauda.Final_rat1))))+foSauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From foSauda foSauda Where foSauda.Firmnumber='SNS-000001' and foSauda.Egroup!='MB' and foSauda.Egroup!='YC' and foSauda.Egroup!='MO' and foSauda.Egroup!='OK' and foSauda.Egroup!='ST' and foSauda.Sterminal!='99999' and foSauda.Sterminal!='99998' Group by foSauda.Dtoftran ) group by dtoftran order by 2 desc

--------------------Daily Branch Brokerage--------------
select distinct Brcode,Dtoftran, sum(Grosspurch) Purchases, sum(Grosssales) Sales, sum(Totalvol) GrossTurnover, sum(Netpsvalue) NetTurnover, sum(Brokerage) Brokerage, sum(Volper), sum(NetBrokerage), sum(Netvolper), sum(nTurnpie), sum(nGrossbrkpie),sum(nNetbrkpie) from (Select sauda.brcode Brcode,Sauda.Dtoftran as Dtoftran, Sum(Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosspurch,Sum(Decode(Sauda.Buysell,'S',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosssales,Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Orignalno,0, Sauda.Quantity*Sauda.Market,0))) as Totalvol, Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Buysell,'B', Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0), Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market*-1,0)))) as Netpsvalue,Sum((Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Final_rat1-Sauda.Oldrate),Sauda.Quantity*(Sauda.Final_rat1-Sauda.Market)),Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Oldrate-Sauda.Final_rat1),Sauda.Quantity*(Sauda.Market-Sauda.Final_rat1))))+Sauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From Sauda Sauda Where Sauda.Firmnumber='SNS-000001' and Sauda.Egroup!='MB' and Sauda.Egroup!='YC' and Sauda.Egroup!='MO' and Sauda.Egroup!='OK' and Sauda.Egroup!='ST' and Sauda.Sterminal!='99999' and Sauda.Sterminal!='99998' and trim(dtoftran)=trunc(SYSDATE-1) Group by Sauda.Dtoftran,sauda.brcode Union Select fosauda.brcode Brcode,foSauda.Dtoftran as Dtoftran, Sum(Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0)),0)) as Grosspurch,Sum(Decode(foSauda.Buysell,'S',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0)),0)) as Grosssales,Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Orignalno,0, foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0))) as Totalvol, Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Buysell,'B', Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0), Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice)*-1,0)))) as Netpsvalue,Sum((Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Final_rat1-foSauda.Oldrate),foSauda.Quantity*(foSauda.Final_rat1-foSauda.Market)),Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Oldrate-foSauda.Final_rat1),foSauda.Quantity*(foSauda.Market-foSauda.Final_rat1))))+foSauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From foSauda foSauda Where foSauda.Firmnumber='SNS-000001' and foSauda.Egroup!='MB' and foSauda.Egroup!='YC' and foSauda.Egroup!='MO' and foSauda.Egroup!='OK' and foSauda.Egroup!='ST' and foSauda.Sterminal!='99999' and foSauda.Sterminal!='99998' and fosauda.saudatype != 'B/' and fosauda.oowncode!='F12838' and trim(dtoftran)=trunc(SYSDATE-1) Group by foSauda.Dtoftran,fosauda.oowncode,fosauda.brcode ) group by brcode,dtoftran

------------------------Daily Family Brokerage------------------

select distinct family,Dtoftran, sum(Grosspurch) Purchases, sum(Grosssales) Sales, sum(Totalvol) GrossTurnover, sum(Netpsvalue) NetTurnover, sum(Brokerage) Brokerage, sum(Volper), sum(NetBrokerage), sum(Netvolper), sum(nTurnpie), sum(nGrossbrkpie),sum(nNetbrkpie) from (Select ldfibs.family,Sauda.Dtoftran as Dtoftran, Sum(Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosspurch,Sum(Decode(Sauda.Buysell,'S',Decode(Sauda.Saudatype,'N' ,0, Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0)),0)) as Grosssales,Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Orignalno,0, Sauda.Quantity*Sauda.Market,0))) as Totalvol, Sum(Decode(Sauda.Saudatype,'N' ,0,Decode(Sauda.Buysell,'B', Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market,0), Decode(Sauda.Orignalno,0,Sauda.Quantity*Sauda.Market*-1,0)))) as Netpsvalue,Sum((Decode(Sauda.Buysell,'B',Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Final_rat1-Sauda.Oldrate),Sauda.Quantity*(Sauda.Final_rat1-Sauda.Market)),Decode(Sauda.Saudatype,'N',Sauda.Quantity*(Sauda.Oldrate-Sauda.Final_rat1),Sauda.Quantity*(Sauda.Market-Sauda.Final_rat1))))+Sauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From Sauda Sauda,ldfibs ldfibs Where Sauda.Firmnumber='SNS-000001' and Sauda.Egroup!='MB' and Sauda.Egroup!='YC' and Sauda.Egroup!='MO' and Sauda.Egroup!='OK' and Sauda.Egroup!='ST' and Sauda.Sterminal!='99999' and Sauda.Sterminal!='99998' and trim(dtoftran)=trunc(SYSDATE-1) and sauda.oowncode=ldfibs.oowncode and sauda.brcode=ldfibs.brcode Group by ldfibs.family,Sauda.Dtoftran Union Select ldfibs.family,foSauda.Dtoftran as Dtoftran, Sum(Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0)),0)) as Grosspurch,Sum(Decode(foSauda.Buysell,'S',Decode(foSauda.Saudatype,'N' ,0, Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0)),0)) as Grosssales,Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Orignalno,0, foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0))) as Totalvol, Sum(Decode(foSauda.Saudatype,'N' ,0,Decode(foSauda.Buysell,'B', Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice),0), Decode(foSauda.Orignalno,0,foSauda.Quantity*(foSauda.Market + foSauda.strikeprice)*-1,0)))) as Netpsvalue,Sum((Decode(foSauda.Buysell,'B',Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Final_rat1-foSauda.Oldrate),foSauda.Quantity*(foSauda.Final_rat1-foSauda.Market)),Decode(foSauda.Saudatype,'N',foSauda.Quantity*(foSauda.Oldrate-foSauda.Final_rat1),foSauda.Quantity*(foSauda.Market-foSauda.Final_rat1))))+foSauda.Brokpercontract) as Brokerage,000.0000 as Volper,0000000000.00 as NetBrokerage,000.0000 as Netvolper, 000.0000 as nTurnpie,000.0000 as nGrossbrkpie,000.0000 as nNetbrkpie From foSauda foSauda, ldfibs ldfibs Where foSauda.Firmnumber='SNS-000001' and foSauda.Egroup!='MB' and foSauda.Egroup!='YC' and foSauda.Egroup!='MO' and foSauda.Egroup!='OK' and foSauda.Egroup!='ST' and foSauda.Sterminal!='99999' and foSauda.Sterminal!='99998' and fosauda.saudatype != 'B/' and fosauda.oowncode!='F12838' and fosauda.oowncode=ldfibs.oowncode and fosauda.brcode=ldfibs.brcode and trim(dtoftran)=trunc(SYSDATE-1) Group by ldfibs.family,foSauda.Dtoftran,fosauda.oowncode ) group by family,dtoftran

Trade Confirmation Query

---------------------bse-------------------------
XLODBC
1
DSN=sns1011odbc;UID=;Pwd=;ConnectString=sns1011srv;
SELECT sauda.exchcode, sauda.vallan, sauda.brcode, sauda.oowncode, a.fibsacct "Name", aa.cstdno, aa.tel1, aa.tel2, aa.mobile,aaa.email,sauda.compcode "Scrip Code", company.fibsacct "Scrip Name", SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0)) "P.QTY", ROUND(SUM(decode(sauda.buysell, 'B', decode(sauda.saudatype, 'N', (sauda.market -sauda.havala_rate) *sauda.quantity, sauda.quantity *sauda.market), 0)) / decode(SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0)), 0, 1, SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0))), 2) "P.avg",SUM(decode(sauda.buysell, 'S', sauda.quantity, 0)) "S.QTY", ROUND(SUM(decode(sauda.buysell, 'S', decode(sauda.saudatype, 'N', (sauda.market -sauda.havala_rate) *sauda.quantity, sauda.quantity *sauda.market), 0)) / SUM(decode(sauda.buysell, 'S', sauda.quantity)), 2) "S.avg" FROM sauda sauda, company company, ldbo.markclose m, ldbo.accounts a, ldbo.accountaddressdetail aa, ldbo.ACCOUNTEMAILDETAIL aaa WHERE sauda.firmnumber = 'SNS-000001' AND sauda.exchcode = 'BSE' AND sauda.dtoftran = TRUNC(sysdate) AND sauda.firmnumber = a.firmnumber AND sauda.oowncode = a.oowncode AND sauda.firmnumber = aa.firmnumber AND sauda.oowncode = aa.oowncode AND sauda.compcode = company.oowncode AND sauda.firmnumber = company.firmnumber AND sauda.firmnumber = m.firmnumber AND company.firmnumber = m.firmnumber AND sauda.compcode = m.code AND sauda.saudatype = 'N' and aa.oowncode=aaa.oowncode and aa.firmnumber=aaa.firmnumber GROUP BY sauda.exchcode, sauda.vallan, sauda.compcode, company.fibsacct, sauda.oowncode, m.market, sauda.vallan, a.fibsacct, aa.cstdno, aa.tel1, aa.tel2, aa.mobile,aaa.email, sauda.brcode, sauda.dtoftran ORDER BY sauda.oowncode


---------------------nse-------------------------

XLODBC
1
DSN=sns1011odbc;UID=;Pwd=;ConnectString=sns1011srv;
SELECT sauda.exchcode, sauda.vallan, sauda.brcode, sauda.oowncode, a.fibsacct "Name", aa.cstdno, aa.tel1, aa.tel2, aa.mobile,aaa.email,sauda.compcode "Scrip Code", company.fibsacct "Scrip Name", SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0)) "P.QTY", ROUND(SUM(decode(sauda.buysell, 'B', decode(sauda.saudatype, 'N', (sauda.market -sauda.havala_rate) *sauda.quantity, sauda.quantity *sauda.market), 0)) / decode(SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0)), 0, 1, SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0))), 2) "P.avg",SUM(decode(sauda.buysell, 'S', sauda.quantity, 0)) "S.QTY", ROUND(SUM(decode(sauda.buysell, 'S', decode(sauda.saudatype, 'N', (sauda.market -sauda.havala_rate) *sauda.quantity, sauda.quantity *sauda.market), 0)) / SUM(decode(sauda.buysell, 'S', sauda.quantity)), 2) "S.avg" FROM sauda sauda, ldbo.ACCOUNTEMAILDETAIL aaa, company company, ldbo.markclose m, ldbo.accounts a, ldbo.accountaddressdetail aa WHERE sauda.firmnumber = 'SNS-000001' AND sauda.exchcode = 'NSE' AND sauda.egroup IN('NN', 'NW') AND sauda.dtoftran = TRUNC(sysdate) AND sauda.firmnumber = a.firmnumber AND sauda.oowncode = a.oowncode AND sauda.firmnumber = aa.firmnumber AND sauda.oowncode = aa.oowncode AND sauda.compcode = company.oowncode AND sauda.firmnumber = company.firmnumber AND sauda.firmnumber = m.firmnumber AND company.firmnumber = m.firmnumber AND sauda.compcode = m.code AND sauda.saudatype = 'N' and aa.firmnumber=aaa.firmnumber and aa.oowncode=aaa.oowncode GROUP BY sauda.exchcode, sauda.vallan, sauda.compcode, company.fibsacct, sauda.oowncode, m.market, sauda.vallan, a.fibsacct, aa.cstdno, aa.tel1, aa.tel2, aa.mobile,aaa.email, sauda.brcode, sauda.dtoftran ORDER BY sauda.oowncode

------------------------nsef------------------------

SELECT fotransactions.exchcode,fotransactions.saudatype,a.brcode,fotransactions.oowncode,a.fibsacct,aa.cstdno,aa.tel1,aa.mobile,aaa.email,fotransactions.compcode "Scrip Code", company.fibsacct "Scrip Name",company.expirydate,fotransactions.strikeprice,fotransactions.optiontype,SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0)) "P.QTY",ROUND(SUM(decode(fotransactions.buysell, 'B', decode(fotransactions.saudatype, 'N',(fotransactions.market -fotransactions.havala_rate) *fotransactions.quantity, fotransactions.quantity *fotransactions.market), 0)) / decode(SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0)), 0, 1, SUM(decode(buysell, 'B', decode(egroup, 'NA', 0, quantity), 0))), 2) "P.avg",SUM(decode(fotransactions.buysell, 'S', fotransactions.quantity, 0)) "S.QTY",ROUND(SUM(decode(fotransactions.buysell, 'S', decode(fotransactions.saudatype, 'N',(fotransactions.market -fotransactions.havala_rate) *fotransactions.quantity, fotransactions.quantity *fotransactions.market), 0)) / SUM(decode(fotransactions.buysell, 'S', fotransactions.quantity)), 2) "S.avg" FROM ldbo.fotransactions fotransactions,company company,ldbo.accounts a,accountaddressdetail aa,ldbo.ACCOUNTEMAILDETAIL aaa WHERE fotransactions.firmnumber = 'SNS-000001' AND fotransactions.saudatype != 'B/' AND fotransactions.exchcode = 'NSEF' AND fotransactions.dtoftran = TRUNC(sysdate) AND fotransactions.firmnumber = a.firmnumber AND fotransactions.oowncode = a.oowncode AND fotransactions.firmnumber = aa.firmnumber AND fotransactions.oowncode = aa.oowncode AND fotransactions.compcode = company.oowncode AND fotransactions.firmnumber = company.firmnumber AND fotransactions.saudatype != 'B/' AND a.firmnumber = 'SNS-000001' and aa.oowncode=aaa.oowncode and aa.firmnumber=aaa.firmnumber GROUP BY fotransactions.exchcode,aa.cstdno,aa.tel1,aa.mobile,a.fibsacct,a.brcode,company.expirydate,fotransactions.compcode,fotransactions.saudatype,fotransactions.optiontype, company.fibsacct,fotransactions.strikeprice,fotransactions.oowncode,fotransactions.dtoftran,aaa.email ORDER BY fotransactions.oowncode

-----------------------nsec---------------------
select fotransactions.EXCHCODE,fotransactions.saudatype,A.BRCODE,fotransactions.OOWNCODE,A.FIBSACCT,Aa.CSTDNO,Aa.tel1,aa.mobile,aaa.email,fotransactions.compcode "Scrip Code",company.fibsacct "Scrip Name",company.EXPIRYDATE,fotransactions.strikeprice,fotransactions.OPTIONTYPE,ROUND(SUM(DECODE(BUYSELL,'B',DECODE(EGROUP,'NA',0,QUANTITY),0))/COMPANY.MARKLOT,2) "P.QTY",round(Sum(Decode(fotransactions.Buysell,'B',Decode(fotransactions.saudatype,'N' ,(fotransactions.MARKET-fotransactions.Havala_rate)*fotransactions.Quantity,fotransactions.Quantity*fotransactions.MARKET),0))/DECODE(SUM(DECODE(BUYSELL,'B',DECODE(EGROUP,'NA',0,QUANTITY),0)),0,1,SUM(DECODE(BUYSELL,'B',DECODE(EGROUP,'NA',0,QUANTITY),0))),4) "P.avg", ROUND(Sum(Decode(fotransactions.Buysell,'S',fotransactions.Quantity,0))/COMPANY.MARKLOT,2) "S.QTY", round(Sum(Decode(fotransactions.Buysell,'S',Decode(fotransactions.saudatype,'N' ,(fotransactions.MARKET-fotransactions.Havala_rate)*fotransactions.Quantity,fotransactions.Quantity*fotransactions.MARKET),0))/Sum(Decode(fotransactions.Buysell,'S',fotransactions.Quantity)),4) "S.avg" FROM ldbo.fotransactions fotransactions,COMPANY,ldbo.accounts a,ldbo.accountaddressdetail aa,ldbo.ACCOUNTEMAILDETAIL aaa WHERE fotransactions.FIRMNUMBER = 'SNS-000001' and fotransactions.saudatype !='B/' and fotransactions.EXCHCODE='NSEC'and fotransactions.dtoftran=TRUNC(sysdate -1) and fotransactions.firmnumber=a.firmnumber and fotransactions.oowncode=a.oowncode and fotransactions.firmnumber=aa.firmnumber and fotransactions.oowncode=aa.oowncode AND fotransactions.COMPCODE = COMPANY.OOWNCODE AND fotransactions.FIRMNUMBER = COMPANY.FIRMNUMBER and fotransactions.saudatype !='B/' and a.FIRMNUMBER = 'SNS-000001' and aa.oowncode=aaa.oowncode and aa.firmnumber=aaa.firmnumber GROUP BY fotransactions.EXCHCODE,Aa.CSTDNO,Aa.tel1,aa.mobile,aaa.email,COMPANY.MARKLOT,A.FIBSACCT,A.BRCODE,company.EXPIRYDATE,fotransactions.compcode,fotransactions.saudatype,fotransactions.OPTIONTYPE,COMPANY.FIBSACCT,fotransactions.strikeprice,fotransactions.OOWNCODE order by fotransactions.OOWNCODE

CLIENT NONTRADING DAYS CASH

Select Ldfibs.Oowncode as Oowncode,Ldfibs.Fibsacct as Fibsacct,Ldfibs.cTermcode as cTermcode,Ldfibs.Brcode as Brcode, Accountgensplit.Dopeningdate,Max(Sauda.Dtoftran) as dLastTradeDate,Round((Sysdate-Max(Sauda.Dtoftran)-1), 0) as nNonTradeDays from Ldfibs Ldfibs, Sauda Sauda, Accountgensplit Accountgensplit,(Select Ldfibs.Oowncode as Oowncode From Ldfibs Where Ldfibs.Firmnumber ='ACML-00001' Minus Select Distinct Sauda.Oowncode as Oowncode From Sauda Where Sauda.Firmnumber = 'ACML-00001' ) tblLdfibs Where Ldfibs.Firmnumber='ACML-00001' and Ldfibs.nfinancialyear='2010' and Ldfibs.Firmnumber=Sauda.Firmnumber(+) and Ldfibs.Oowncode=Sauda.Oowncode(+) and Ldfibs.Oowncode=tblLdfibs.Oowncode(+) and Ldfibs.Firmnumber=Accountgensplit.Firmnumber(+) and Ldfibs.Oowncode=Accountgensplit.Oowncode(+) and Accountgensplit.Dopeningdate is not null Group by Ldfibs.Oowncode, Ldfibs.cTermcode,Ldfibs.Fibsacct,Ldfibs.Brcode,Accountgensplit.Dopeningdate ;

LTM Monitoring

select distinct cexchcode EXCHANGE,
------max(nordertime) OTIME,
max(ntradetime)TTIME from TBLRKTRANHISTORY where trim(dtransactiondate)=trunc(sysdate) group by cexchcode;

Kill DLLhost from task Manager

When memory leaks occur in IIS scripts, you will find DLLHost.exe eating up a lot of memory and ultimately reaching a point where you will need to restart IIS to get the service working again… One way to contain this automatically is to use the Performance Logs and Alerts snapin.

1. Create a batch file that does nothing but run tskill /a dllhost
2. Surf to Control Panel | Administrative Tools | Performance
3. Open up Performance Logs and Alerts
4. Right-click on Alerts and click New Alert Settings. Name it “Auto-kill DLLHost”.
5. Click the “ADD” button, choose the computer, choose Memory and then create an alert for 1GB.

1. In the Action tab, choose Run this Program and set it to run the batch file you created above.

Now, every time memory crosses 1GB usage, the dllhost process is automatically killed.


Monday, March 21, 2011

DataBase Template FY 11-12





































MULTIPURPOSE
20
40
false
WE8MSWIN1252
AL16UTF16
true
{ORACLE_BASE}\admin\{DB_UNIQUE_NAME}\pfile\init.ora

{ORACLE_HOME}\database\spfile{SID}.ora



100
16
3
1
8





INDX
false
true
0
35000
false
true
1000
-1


SYSAUX
false
true
0
450
true
true
10240
-1


SYSTEM
false
true
0
500
true
true
10240
-1


TEMPORARY
false
true
0
2000
true
true
640
-1


UNDOTBS1
false
true
0
2000
true
true
5120
-1


USR
false
true
0
55000
true
true
1000
-1


true
1
false
false
false
false
true
-1
1
-1
-1
-1
-1
-1
-2
-1
true
false
0
true
true


1




true
1
false
false
false
false
true
-1
1
-1
64
64
50
1
4096
64
true
false
0
true
false


-1




true
1
false
false
false
false
true
-1
3
-1
64
64
50
1
-1
64
true
false
0
true
false


-1




true
1
false
true
true
false
true
-1
1
-1
64
64
0
1
0
64
true
false
0
true
false


-1




true
1
false
false
false
true
true
-1
1
-1
512
512
50
8
4096
512
true
false
0
true
false


-1




true
1
false
false
false
false
true
-1
1
-1
128
128
0
1
4096
128
true
false
0
true
true


-1




false
204800
1



false
204800
1



false
204800
1







Saturday, March 19, 2011

LD Branch Change Process

Branch Change Process

---------------------------------

File
Code Branch
22500T001|2760
22500T002|2760
22500V001|2760
22500V002|2760
580817183|8230
580817105|8230
580817135|8230

---------------------------------

Disconnect all LD users

---------------------------------

Error
225000000 TIMIR PATEL (COMM) 2760 Journal Voucher JVAUTOJ3097426 contains duplicate Clients

Search that JV into Ledger of Client 225000000

and copy jounral register entry(DR Cr) for that JV

if you forget the find it from MIS > journal audit delete


Delete Duplicate JVs
------------

again branch change

---------------------------------

if ODBC error

Client Branch Code change could not be Synchronized.


then run

set serveroutput on ;
execute sp_serseq('2010-2011') ;


---------------------------------

Enter journal entries again for deleted JVs

----------------

Friday, March 18, 2011

Which file include platform info about oracle server setup

\database\stage\prereq\prerequisite.properties

Thursday, March 17, 2011

some instances on 10.2.0.1 and some on 10.2.0.3 on different servers. We have to create database for all instances on one server

Scenario
some instances on 10.2.0.1 and some on 10.2.0.3 on different servers. We have to create database for all instances on one server


Install 10.2.0.1
Apply patch set 10.2.0.3

Before you apply the patchset you have to check whether the Oracle Version is 32 bit or 64 bit Version, because
the patchsets are different.


SQL> SHUTDOWN IMMEDIATE.
Stop all the Oracle Related Services.
Ex:- Oracle Listener, Oracle DB Console,Oracle JobScheduler,
Distrubed Transaction Co-ordinator.
Run the Patch Setup.exe in the same Oracle Home.

------------------------startup upgrade for all instances------------
SQL> CONNECT SYS/oracle@ari0809srv AS SYSDBA

STARTUP UPGRADE
SPOOL c:\patch.log
@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\catupgrd.sql
SQL> SPOOL OFF

SHUTDOWN IMMEDIATE
STARTUP
------------------------

@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlip.sql
@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlrp.sql



mkdir D:\oracle\product\10.2.0\admin\ari0405\adump
mkdir D:\oracle\product\10.2.0\admin\ari0405\bdump
mkdir D:\oracle\product\10.2.0\admin\ari0405\cdump
mkdir D:\oracle\product\10.2.0\admin\ari0405\dpdump
mkdir D:\oracle\product\10.2.0\admin\ari0405\pfile
mkdir D:\oracle\product\10.2.0\admin\ari0405\udump


mkdir D:\oracle\product\10.2.0\admin\ari0506\adump
mkdir D:\oracle\product\10.2.0\admin\ari0506\bdump
mkdir D:\oracle\product\10.2.0\admin\ari0506\cdump
mkdir D:\oracle\product\10.2.0\admin\ari0506\dpdump
mkdir D:\oracle\product\10.2.0\admin\ari0506\pfile
mkdir D:\oracle\product\10.2.0\admin\ari0506\udump


mkdir D:\oracle\product\10.2.0\admin\ari0607\adump
mkdir D:\oracle\product\10.2.0\admin\ari0607\bdump
mkdir D:\oracle\product\10.2.0\admin\ari0607\cdump
mkdir D:\oracle\product\10.2.0\admin\ari0607\dpdump
mkdir D:\oracle\product\10.2.0\admin\ari0607\pfile
mkdir D:\oracle\product\10.2.0\admin\ari0607\udump

mkdir D:\oracle\product\10.2.0\admin\ari0708\adump
mkdir D:\oracle\product\10.2.0\admin\ari0708\bdump
mkdir D:\oracle\product\10.2.0\admin\ari0708\cdump
mkdir D:\oracle\product\10.2.0\admin\ari0708\dpdump
mkdir D:\oracle\product\10.2.0\admin\ari0708\pfile
mkdir D:\oracle\product\10.2.0\admin\ari0708\udump

mkdir D:\oracle\product\10.2.0\admin\ari0809\adump
mkdir D:\oracle\product\10.2.0\admin\ari0809\bdump
mkdir D:\oracle\product\10.2.0\admin\ari0809\cdump
mkdir D:\oracle\product\10.2.0\admin\ari0809\dpdump
mkdir D:\oracle\product\10.2.0\admin\ari0809\pfile
mkdir D:\oracle\product\10.2.0\admin\ari0809\udump


mkdir D:\oracle\product\10.2.0\admin\ari0910\adump
mkdir D:\oracle\product\10.2.0\admin\ari0910\bdump
mkdir D:\oracle\product\10.2.0\admin\ari0910\cdump
mkdir D:\oracle\product\10.2.0\admin\ari0910\dpdump
mkdir D:\oracle\product\10.2.0\admin\ari0910\pfile
mkdir D:\oracle\product\10.2.0\admin\ari0910\udump


copy paste init files to database folder and tnsnames, listener to network admin folder


set ORACLE_SID=ari2

oradim -new -sid ari2 -SRVC OracleServiceari2 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initari2.ORA

set ORACLE_SID=ari3

oradim -new -sid ari3 -SRVC OracleServiceari3 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initari3.ORA


set ORACLE_SID=ari4
oradim -new -sid ari4 -SRVC OracleServiceari4 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initari4.ORA


set ORACLE_SID=ari5
oradim -new -sid ari5 -SRVC OracleServiceari5 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initari5.ORA


set ORACLE_SID=ari6
oradim -new -sid ari6 -SRVC OracleServiceari6 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initari6.ORA


set ORACLE_SID=ari7
oradim -new -sid ari7 -SRVC OracleServiceari7 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initari7.ORA



sqlplusw sys/oracle@ari0405srv as sysdba
startup pfile='D:\oracle\product\10.2.0\db_1\database\initari2.ora';
create spfile from pfile;
shut immediate
startup

sqlplusw ldbo/ldbo@ari0405srv as sysdba




sqlplusw sys/oracle@ari0506srv as sysdba
startup pfile='D:\oracle\product\10.2.0\db_1\database\initari3.ora';
create spfile from pfile;
shut immediate
startup

sqlplusw ldbo/ldbo@ari0506srv as sysdba



sqlplusw sys/oracle@ari0607srv as sysdba
startup pfile='D:\oracle\product\10.2.0\db_1\database\initari4.ora';
create spfile from pfile;
shut immediate
startup

sqlplusw ldbo/ldbo@ari0607srv as sysdba



sqlplusw sys/oracle@ari0708srv as sysdba
startup pfile='D:\oracle\product\10.2.0\db_1\database\initari5.ora';
create spfile from pfile;
shut immediate
startup


sqlplusw ldbo/ldbo@ari0708srv as sysdba



sqlplusw sys/oracle@ari0809srv as sysdba
startup pfile='D:\oracle\product\10.2.0\db_1\database\initari6.ora';
create spfile from pfile;
shut immediate
startup


sqlplusw sys/oracle@ari0910srv as sysdba
startup pfile='D:\oracle\product\10.2.0\db_1\database\initari7.ora';
create spfile from pfile;
shut immediate
startup

Tuesday, March 15, 2011

Patch

Apply patch

Before you apply the patchset you have to check whether the Oracle Version is 32 bit or 64 bit Version, because
the patchsets are different.
For 32 Bit Version, Patch no is p6810189_10204_Win32_patchset
For 64 Bit Version, Patch no is p6810189_10204_MSWIN-x86-64.

1. Shut down the database:
SQL> SHUTDOWN IMMEDIATE.
Stop all the Oracle Related Services.
Ex:- Oracle Listener, Oracle DB Console,Oracle JobScheduler,
Distrubed Transaction Co-ordinator.
Run the Patch Setup.exe in the same Oracle Home.
For Example: If Existing Oracle is installed in c:\Oracle\product\10.2.0\db_1 then you
have to select the same path When you run the Setup.exe. After Successful installation
start the Listener & Db Console etc.,
Enter the following SQL*Plus commands:
SQL> SET ORACLE_SID=sns6
SQL> SQLPLUS/NOLOG
SQL> CONNECT SYS/LINUX@sns1011SRV AS SYSDBA
SQL> STARTUP UPGRADE
SQL> SPOOL patch.log
SQL> @ORACLE_BASE\ORACLE_HOME\rdbms\admin\catupgrd.sql
SQL> SPOOL OFF

@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlu102s.sql TO DISPLAY RESULT OF UPGRADE

Review the patch.log file for errors and inspect the list of components that is displayed at the end of
catupgrd.sql script.
This list provides the version and status of each SERVER component in the database.
If necessary, rerun the catupgrd.sql script after correcting any problems.


SQL> SHUTDOWN IMMEDIATE
SQL> STARTUP

Run the utlrp.sql script to recompile all invalid PL/SQL packages now instead of when the packages are
accessed for the first time. This step is optional but recommended.

SQL> @ORACLE_BASE\ORACLE_HOME\rdbms\admin\utlip.sql
SQL> @ORACLE_BASE\ORACLE_HOME\rdbms\admin\utlirp.sql
SQL> @ORACLE_BASE\ORACLE_HOME\rdbms\admin\utlrp.sql




result of upgrade

@D:\oracle\product\10.2.0\db_1\RDBMS\ADMIN\utlu102s.sql

if owr is not installed and if require then instal manually

to install oracle workspace manager

select username from dba_users where username ='WMSYS';

ALTER USER WMSYS IDENTIFIED BY WMSYS ACCOUNT UNLOCK;

@%oracle_home%\RDBMS\ADMIN\owminst.plb

select comp_name,version,status from dba_registry;

Email Notification for LDBO login

CREATE OR REPLACE TRIGGER LDBO_logon_notifications
after logon on database
DECLARE
b VARCHAR2(3) := UTL_TCP.CRLF;
l_subject VARCHAR2(40) := 'Alert - LDBO Login';
l_message VARCHAR2(500);

BEGIN
if (USER in ('LDBO') AND SYS_CONTEXT('USERENV','Module') not IN ('vfp9.exe') AND SYS_CONTEXT( 'USERENV', 'IP_ADDRESS' ) NOT IN ('192.168.0.25','192.168.1.84','192.168.1.199','192.168.1.64','192.168.1.126','192.168.1.127','192.168.1.95','192.168.1.163') ) then

l_message :=
'Date/Time: ' || TO_CHAR( SYSDATE, 'mm/dd/yyyy hh24:mi:ss' ) || b;
l_message :=
l_message || 'OS User: ' || SYS_CONTEXT( 'USERENV', 'OS_USER' ) || b;

l_message :=
l_message || 'Host: ' || SYS_CONTEXT( 'USERENV', 'HOST' ) || b;
l_message :=
l_message || 'Terminal: ' || SYS_CONTEXT( 'USERENV', 'TERMINAL' ) || b;

l_message :=
l_message || 'IP Address: ' || SYS_CONTEXT( 'USERENV', 'IP_ADDRESS' ) || b;
l_message :=
l_message || 'Database Name: ' || ora_database_name || b;
l_message :=
l_message || 'Application Name: ' || SYS_CONTEXT('USERENV','Module') || b;

BEGIN
utl_mail.send
( sender => 'kshitij.rakesh@arihantcapital.com',
recipients => 'kshitij.rakesh@arihantcapital.com',
subject => l_subject,
message => l_message );

EXCEPTION
WHEN others THEN
RAISE;

END;
END IF;
END LDBO_logon_notifications;
/

Monday, March 14, 2011

Oracle Database Incarnation, Open Resetlogs , SCN

Q. What happens when you run ALTER DATABASE OPEN RESETLOGS ?
The current online redo logs are archived, the log sequence number is reset to 1, new database incarnation is created, and the online redo logs are given a new time stamp and SCN.
.

Q. In what scenarios open resetlogs required ?

An ALTER DATABASE OPEN RESETLOGS statement is required after incomplete recovery (Point in Time Recovery) or recovery with a backup control file.
.

Q. What is SCN (System Change Number) ?

The system change number (SCN) is an ever-increasing value that uniquely identifies a committed version of the database at a point in time. Every time a user commits a transaction Oracle records a new SCN in redo logs.

Oracle uses SCNs in control files datafile headers and redo records. Every redo log file has both a log sequence number and low and high SCN. The low SCN records the lowest SCN recorded in the log file while the high SCN records the highest SCN in the log file.
.

Q. What is Database Incarnation ?

database incarnation

Database incarnation is effectively a new “version” of the database that happens when you reset the online redo logs using “alter database open resetlogs;”.

Database incarnation falls into following category Current, Parent, Ancestor and Sibling

i) Current Incarnation : The database incarnation in which the database is currently generating redo.

ii) Parent Incarnation : The database incarnation from which the current incarnation branched following an OPEN RESETLOGS operation.

iii) Ancestor Incarnation : The parent of the parent incarnation is an ancestor incarnation. Any parent of an ancestor incarnation is also an ancestor incarnation.

iv) Sibling Incarnation : Two incarnations that share a common ancestor are sibling incarnations if neither one is an ancestor of the other.
.

Q. How to view incarnation history of Database ?

Using SQL> select * from v$database_incarnation;
Using RMAN>LIST INCARNATION;

However, you can use the RESET DATABASE TO INCARNATION command to specify that SCNs are to be interpreted in the frame of reference of another incarnation.

•For example my current database INCARNATION is 3 and now I have used
FLASHBACK DATABASE TO SCN 3000;then SCN 3000 will be search in current incarnation which is 3. However if I want to get back to SCN 3000 of INCARNATION 2 then I have to use,

RMAN> RESET DATABASE TO INCARNATION 2;
RMAN> RECOVER DATABASE TO SCN 3000;




Multiplexing of Redo logs

SELECT group#, members, status, bytes FROM v$log;
SELECT group#, member, type FROM v$logfile;

ALTER DATABASE ADD LOGFILE MEMBER 'C:\REDO1011\REDO1.LOG' TO GROUP 1;
ALTER DATABASE ADD LOGFILE MEMBER 'C:\REDO1011\REDO2.LOG' TO GROUP 2;
ALTER DATABASE ADD LOGFILE MEMBER 'C:\REDO1011\REDO3.LOG' TO GROUP 3;


SELECT group#, members, status, bytes FROM v$log;
SELECT group#, member, type FROM v$logfile;

Sunday, March 13, 2011

RMAN Configuration Channel Allocation

Automatic channel allocation:
CONFIGURE PARALLELISM FOR AUTOMATIC CHANNELS:
RMAN> CONFIGURE DEVICE TYPE DISK PARALLELISM 4;
CONFIGURE AUTOMATIC CHANNEL OPTIONS:
RMAN> CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT='F:\BACKUP\%U';
CONFIGURING BACKUPPIECE SIZE:
RMAN> CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT ='F:\BACKUP\%U' MAXPIECESIZE
500M;
RMAN> CONFIGURE CHANNEL DEVICE TYPE DISK
CHANGE THE DEFAULT DEVICE TYPE:
RMAN> CONFIGURE DEFAULT DEVICE TYPE TO DISK;
CONFIGURATION OF EXCLUDING TABLESPACE FROM DATABASE BACKUP:
RMAN> CONFIGURE EXCLUDE FOR TABLESPACE ;
NOTE: when the following command is issued, it will skip the tablespace which is specified in the above
command. SYSTEM tablespace can not be excluded from whole database backup.
RMAN> BACKUP DATABASE;
NOTE: To overwrite this one,
RMAN> BACKUP DATABASE NOEXCLUDE;
EXCLUDING READ ONLY AND OFFLINE TABLESPACE:
RMAN> BACKUP DATABASE SKIP READONLY SKIP OFFLINE;
TO LIST THE CURRENT CONFIGURATION OF THE TARGET DATABASE:
RMAN> REPORT SCHEMA;
RETENTION POLICY:
There are two options for implementing a backup retention policy.
RMAN> CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 7 DAYS;
RMAN> CONFIGURE RETENTION POLICY TO REDUNDANCY 2;
NOTE: Default is redundancy 1.
RMAN> CONFIGURE RETENTION POLICY CLEAR;
Clear means it will take the default setting (i.e. Redundancy 1)
RMAN> CONFIGURE RETENTION POLICY TO NONE;
RMAN> REPORT OBSOLETE;
RMAN> DELETE OBSOLETE;
RMAN> SHOW RETENTION POLICY;
BACKUP OPTIMIZATION:
RMAN> SHOW BACKUP OPTIMIZATION;
the BACKUP command skips the backup of a file when the identical file backup exist
RMAN> CONFIGURE BACKUP OPTIMIZATION ON / OFF;
CONFIGURING DUPLEXED BACKUP SET:
RMAN> CONFIGURE DATAFILE BACKUP COPIES FOR DEVICE TYPE DISK TO 2;
SHOW COMMAND:
RMAN> SHOW CHANNEL;
RMAN> SHOW DEFAULT DEVICE TYPE;
RMAN> SHOW DEFAULT DEVICE TYPE;
RMAN> SHOW BACKUP OPTIMIZATION;
RMAN> SHOW RETENTION POLICY;
RMAN> SHOW MAXSETSIZE;
RMAN> SHOW ALL;
LIST COMMAND:
RMAN> LIST BACKUP OF DATABASE;
RMAN> LIST BACKUP OF TABLESPACE USERS;
RMAN> LIST BACKUP OF DATAFILE 1;
RMAN> LIST COPY;
RMAN> LIST COPY OF TABLESPACE USERS;
RMAN> LIST COPY OF DATAFILE 2;
REPORT COMMAND:
RMAN> REPORT SCHEMA;
RMAN> REPORT OBSOLETE;
RMAN> REPORT NEED BACKUP DAYS 5;
RMAN> REPORT NEED BACKUP REDUNDANCY 10;
RMAN> REPORT NEED BACKUP INCREMENTAL 3 DATABASE;
Manual channel allocation:
RMAN>RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT=’E:\DATAFILE3.BAK’;
BACKUP DATAFIE 3;
}
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='C:\BACKUP\%U','E:\BACKUP\%U','F:\BACKUP\%U';
BACKUP COPIES 3 DATAFILE 9;
}
.Creating different backup piece in different locations:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK FORMAT='F:\BACKUP\%U';
ALLOCATE CHANNEL C2 TYPE DISK FORMAT='F:\BACKUP\%U';
ALLOCATE CHANNEL C3 TYPE DISK FORMAT='F:\BACKUP\%U';
BACKUP DATAFILE 8;
}
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='F:\BACKUP\DATAFILE8_%c.BAK';
BACKUP COPIES 3 DATAFILE 8;
}
NOTE: In Format %c is mandatory
RMAN>RUN {
BACKUP AS BACKUPSET
FORMAT '/u01/db01/backup/%d_%s_%p'
DURATION 10:00 MINIMIZE LOAD
(DATABASE);
SQL 'alter system archive log current';
}
RMAN BACKUP:
RMAN> BACKUP AS BACKUPSET
FORMAT '/BACKUP/df_%d_%s_%p.bus'
TABLESPACE hr_data;
DATABASE BACKUP:
RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='F:\BACKUP\%U';
BACKUP DATABASE;
}
TABLESPACE BACKUP:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='F:\BACKUP\%U';
BACKUP TABLESPACE USERS,UNDOTBS1;
}
DATAFILE BACKUP:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='F:\BACKUP\%U';
BACKUP DATAFILE 7,8,9;
}
ARCHIVED REDOLOG BACKUP:
RMAN> BACKUP DATABASE PLUS ARCHIVELOG;
RMAN> BACKUP TABLESPACE USERS PLUS ARCHIVELOG;
RMAN> BACKUP DATAFILE 7 PLUS ARCHIVELOG;
RMAN>BACKUP
FORMAT '/disk1/backup/ar_%t_%s_%p'
ARCHIVELOG FROM SEQUENCE=234
DELETE INPUT;
Multiplexed Backup Sets
RMAN>RUN {
ALLOCATE CHANNEL c1 DEVICE TYPE disk;
ALLOCATE CHANNEL c2 DEVICE TYPE disk;
ALLOCATE CHANNEL c3 DEVICE TYPE disk;
BACKUP
INCREMENTAL LEVEL = 0
FORMAT '%u.bak'
(DATAFILE 1 CHANNEL c1)
(DATAFILE 2 CHANNEL c2) (DATAFILE 3 CHANNEL c3);
Sql ‘ALTER SYSTEM ARCHIVE LOG CURRENT’;
}
RMAN>RUN {
ALLOCATE CHANNEL c1 type disk;
COPY INCREMENTAL LEVEL = 0
DATAFILE 1 tag=DF3,
ARCHIVELOG all;
}
RMAN>BACKUP COPY OF DATABASE;
RMAN>ALTER DATABASE ENABLE
BLOCK CHANGE TRACKING
USING FILE '/mydir/rman_change_track.f'
REUSE;
RMAN>RECOVER COPY OF
DATAFILE {n|'file_name'}
WITH TAG 'incr_upd_df1';
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK MAXOPENFILES=3;
BACKUP FILESPERSET 6 FORMAT 'F:\BACKUP\%U' DATABASE;
}
DUPLEXED BACKUP SET:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='C:\BACKUP\%U','C:\NEW\%U';
BACKUP COPIES 2 DATAFILE 2;
}
PARALLELIZATION OF BACKUP SETS:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK;
ALLOCATE CHANNEL C2 TYPE DISK;
ALLOCATE CHANNEL C3 TYPE DISK;
BACKUP
FORMAT 'C:\BACKUP\%U'
(DATAFILE 1,2,3 CHANNEL C1)
(DATAFILE 4,5,6 CHANNEL C2)
(DATAFILE 7,8,9 CHANNEL C3);
}
BACKUP PIECE SIZE:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK MAXPIECESIZE=200M;
BACKUP FORMAT 'C:\BACKUP\%U' (TABLESPACE SYSTEM);
}
ARCHIVED REDOLOG BACKUP SETS:
RMAN> BACKUP
FORMAT 'C:\BACKUP\%U'
ARCHIVELOG ALL;
RMAN> BACKUP
FORMAT 'C:\BACKUP\%U'
ARCHIVELOG ALL DELETE ALL INPUT;
CONTROLFILE SPFILE BACKUP:
RMAN> CONFIGURE CONTROLFILE AUTOBACKUP ON;
RMAN> BACKUP DATAFILE 5 INCLUDE CURRENT CONTROLFILE;
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='C:\BACKUP\SPCONTROL.BAK';
BACKUP CURRENT CONTROLFILE;
}
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='C:\BACKUP\SPCONTROL.BAK';
BACKUP SPFILE;
}
TAGS FOR BACKUP AND IMAGE COPY:
RMAN> RUN
{
ALLOCATE CHANNEL C1 TYPE DISK
FORMAT='C:\BACKUP\DF7.BAK';
BACKUP DATAFILE 7 TAG='DF7';
}
RMAN> RUN
{
COPY
DATAFILE 6 TO 'C:\BACKUP\DF6.BAK' TAG='DF6';
}

SQL format

column Format A35

set lines 9999
set pages 9999
set colsep "|"



------

Wednesday, March 9, 2011

Why 10.2.0.4 patch set?

bugs fixed in this patch set:

Higher CPU / Higher "cache buffer chains" latch gets / Higher "consistent gets" after truncate/Rebuild.

SYSTEM RESET of an spfile parameter can corrupt the SPFILE
/*append parallel*/ can corrupt an index

deadlock hang on "cursor: pin S wait on X" (typically from DBMS_STATS)
Wrong results with bind variables/CURSOR_SHARING

5079978 High US enqueue contention in RAC
5387030 Automatic tuning of undo_retention causes unusual extra space allocation
5439554 "buffer busy wait" timeouts with automatic undo management and in memory undo
5442919 Expired extents not being reused (ORA-30036)
5512921 Instance crash caused by SMON OERI[kcblus_1] / dump

----------------------

Sunday, March 6, 2011

Incremental Backup

RMAN Incremental Backup

Level 0 (same as full backup but used for incremental backup level1)
backup incremental level 0 as compressed backupset database FORMAT 'D:\archive1011\%U';

  • A differential backup, which backs up all blocks changed after the most recent incremental backup at level 1 or 0

  • A cumulative backup, which backs up all blocks changed after the most recent incremental backup at level 0



backup incremental level 1 as compressed backupset database FORMAT 'D:\archive1011\%U';
backup incremental level 1 as compressed backupset database FORMAT 'D:\archive1011\%U';
backup incremental level 1 as compressed backupset database FORMAT 'D:\archive1011\%U';
backup incremental level 1 as compressed backupset database FORMAT 'D:\archive1011\%U';

Cumulative incremental backups reduce the work needed for a restore by ensuring that you only need one incremental backup from any particular level. Cumulative backups require more space and time than differential backups, however, because they duplicate the work done by previous backups at the same level.

cumulative incremental backup (backup after level 0)
backup cumulative incremental level 1 as compressed backupset database FORMAT 'D:\archive1011\%U';

Wednesday, February 23, 2011

Wait Events

http://www.scribd.com/doc/3321687/09-enqueues

SQL*Net message from client

The server process (foreground process) waits for a message from the client process to arrive.




db file scattered

The db file scattered Oracle metric event signifies that the user process is reading buffers into the SGA buffer cache and is waiting for a physical I/O call to return.

A db file scattered read issues a scatter-read to read the data into multiple discontinuous memory locations. A scattered read is usually a multiblock read. It can occur for a fast full scan (of an index) in addition to a full table scan.

* db file sequential read—A single-block read (i.e., index fetch by ROWID)

* db file scattered read—A multiblock read (a full-table scan, OPQ, sorting)


read by other session
read by other session occurs when two users need access to the same block of data. The first user reads the data from disk and places it in the buffer cache. The second user has to wait for the first users operation to complete so they are placed in to waiting. This is when the read by other session wait occurs. Unfortunately this is one of those events we need to "catch in the act" to properly resolve.
http://www.rampant-books.com/art_read_by_other_session.htm



log file sync
When a user session commits, the session's redo information needs to be flushed to the redo logfile. The user session will post the LGWR to write the log buffer to the redo log file. When the LGWR has finished writing, it will post the user session.

log file parallel write
Writing redo records to the redo log files from the log buffer.


db file parallel write
The db file parallel write Oracle metric occurs when the process, typically DBWR, has issued multiple I/O requests in parallel to write dirty blocks from the buffer cache to disk, and is waiting for all requests to complete.




PX Deq Credit: send blkd -----------------



direct path read
direct path read waits only when you are doing a parallel full-scan.



enq: RO - fast object reuse



Buffer Busy Waits
A buffer busy wait occurs if multiple processes want to access a buffer in the buffer cache concurrently.
The main way to reduce buffer busy waits is to reduce the total I/O on the system. This can be done by tuning the SQL to access rows with fewer block reads (i.e., by adding indexes). Even if we have a huge db_cache_size, we may still see buffer busy waits, and increasing the buffer size won't help.


The most common remedies for high buffer busy waits include database writer (DBWR) contention tuning, adding freelists to a table and index, implementing Automatic Segment Storage Management (ASSM, a.k.a bitmap freelists), and, of course, and adding a missing index to reduce buffer touches.






rdbms ipc message

The background processes (LGWR, DBWR, LMS0) use this event to indicate that they are idle and are waiting for the foreground processes to send them an IPC message to do some work.




Streams AQ: waiting for messages in the queue

The session is waiting on an empty OLTP queue (Advanced Queuing) for a message to arrive so that the session can dequeue that message.





library cache lock
Oracle's library cache is nothing more than an area in memory, specifically one of three parts inside the shared pool. The library cache is composed of shared SQL areas, PL/SQL packages and procedures, various locks & handles, and in the case of a shared server configuration, stores private SQL areas. Whenever an application wants to execute SQL or PL/SQL (collectively called code), that code must first reside inside Oracle's library cache. When applications run and reference code, Oracle will first search the library cache to see if that code already exists in memory.


1. situation
library cache lock / pins is happen when object is pin in memory (executing , compile ...), because is executed and another session want to use id (compilation , grant ...)
2. situation
first session make long DML and later second session try DDL (ALTER TABLE)



Time Model Statistics

The goal of a DBA would be to reduce the DB time number to be as low as possible for any given time period. Obviously DBAs constantly try and reduce this number by eliminating wait events, but now we have a bit more incentive to reduce DB time by tuning SQL, applications, architecture, database design, instance layout, etc. –realizing that if we can produce a result set faster then DB time will also be reduced.

AWR Sections

AWR report is broken into multiple parts.

1)Instance information:-
This provides information the instance name , number,snapshot ids,total time the report was taken for and the database time during this elapsed time.

Elapsed time= end snapshot time – start snapshot time
Database time= Work done by database during this much elapsed time( CPU and I/o both add to Database time).If this is lesser than the elapsed time by a great margin, then database is idle.Database time does not include time spend by the background processes.

2)Cache Sizes : This shows the size of each SGA region after AMM has changed them. This information
can be compared to the original init.ora parameters at the end of the AWR report.

3)Load Profile: This important section shows important rates expressed in units of per second and
transactions per second.This is very important for understanding how is the instance behaving.This has to be compared to base line report to understand the expected load on the machine and the delta during bad times.

4)Instance Efficiency Percentages (Target 100%): This section talks about how close are the vital ratios like buffer cache hit, library cache hit,parses etc.These can be taken as indicators ,but should not be a cause of worry if they are low.As the ratios cold be low or high based in database activities, and not due to real performance problem.Hence these are not stand alone statistics, should be read for a high level view .

5)Shared Pool Statistics: This summarizes changes to the shared pool during the snapshot
period.

6)Top 5 Timed Events :This is the section which is most relevant for analysis.This section shows what % of database time was the wait event seen for.Till 9i, this was the way to backtrack what was the total database time for the report , as there was no Database time column in 9i.

7)RAC Statistics :This part is seen only incase of cluster instance.This provides important indication on the average time take for block transfer, block receiving , messages ., which can point to performance problems in the Cluster instead of database.

8)Wait Class : This Depicts which wait class was the area of contention and where we need to focus.Was that network, concurrency, cluster, i/o Application, configuration etc.

9)Wait Events Statistics Section: This section shows a breakdown of the main wait events in the
database including foreground and background database wait events as well as time model, operating
system, service, and wait classes statistics.

10)Wait Events: This AWR report section provides more detailed wait event information for foreground
user processes which includes Top 5 wait events and many other wait events that occurred during
the snapshot interval.

11)Background Wait Events: This section is relevant to the background process wait events.

12)Time Model Statistics: Time mode statistics report how database-processing time is spent. This
section contains detailed timing information on particular components participating in database
processing.This gives information about background process timing also which is not included in database time.

13)Operating System Statistics: This section is important from OS server contention point of view.This section shows the main external resources including I/O, CPU, memory, and network usage.

14)Service Statistics: The service statistics section gives information services and their load in terms of CPU seconds, i/o seconds, number of buffer reads etc.

15)SQL Section: This section displays top SQL, ordered by important SQL execution metrics.

a)SQL Ordered by Elapsed Time: Includes SQL statements that took significant execution
time during processing.

b)SQL Ordered by CPU Time: Includes SQL statements that consumed significant CPU time
during its processing.

c)SQL Ordered by Gets: These SQLs performed a high number of logical reads while
retrieving data.

d)SQL Ordered by Reads: These SQLs performed a high number of physical disk reads while
retrieving data.

e)SQL Ordered by Parse Calls: These SQLs experienced a high number of reparsing operations.

f)SQL Ordered by Sharable Memory: Includes SQL statements cursors which consumed a large
amount of SGA shared pool memory.

g)SQL Ordered by Version Count: These SQLs have a large number of versions in shared pool
for some reason.

16)Instance Activity Stats: This section contains statistical information describing how the database
operated during the snapshot period.

17)I/O Section: This section shows the all important I/O activity.This provides time it took to make 1 i/o say Av Rd(ms), and i/o per second say Av Rd/s.This should be compared to the baseline to see if the rate of i/o has always been like this or there is a diversion now.

18)Advisory Section: This section show details of the advisories for the buffer, shared pool, PGA and
Java pool.

19)Buffer Wait Statistics: This important section shows buffer cache waits statistics.

20)Enqueue Activity: This important section shows how enqueue operates in the database. Enqueues are
special internal structures which provide concurrent access to various database resources.

21)Undo Segment Summary: This section gives a summary about how undo segments are used by the database.
Undo Segment Stats: This section shows detailed history information about undo segment activity.

22)Latch Activity: This section shows details about latch statistics. Latches are a lightweight
serialization mechanism that is used to single-thread access to internal Oracle structures.The latch should be checked by its sleeps.The sleepiest Latch is the latch that is under contention , and not the latch with high requests.Hence run through the sleep breakdown part of this section to arrive at the latch under highest contention.

23)Segment Section: This portion is important to make a guess in which segment and which segment type the contention could be.Tally this with the top 5 wait events.

Segments by Logical Reads: Includes top segments which experienced high number of
logical reads.

Segments by Physical Reads: Includes top segments which experienced high number of disk
physical reads.

Segments by Buffer Busy Waits: These segments have the largest number of buffer waits
caused by their data blocks.

Segments by Row Lock Waits: Includes segments that had a large number of row locks on
their data.

Segments by ITL Waits: Includes segments that had a large contention for Interested
Transaction List (ITL). The contention for ITL can be reduced by increasing INITRANS storage
parameter of the table.

24)Dictionary Cache Stats: This section exposes details about how the data dictionary cache is
operating.

25)Library Cache Activity: Includes library cache statistics which are needed in case you see library cache in top 5 wait events.You might want to see if the reload/invalidations are causing the contention or there is some other issue with library cache.

26)SGA Memory Summary:This would tell us the difference in the respective pools at the start and end of report.This could be an indicator of setting minimum value for each, when sga)target is being used..

27)init.ora Parameters: This section shows the original init.ora parameters for the instance during
the snapshot period.

Tuesday, February 22, 2011

delete listener services in windows

how to delete listener services in windows

regedt32-->hkey_local_machine-->system-->currentcontrolset-->services-->oracle and delete it by delete key.

Sunday, February 20, 2011

RMAN :Restore Different Server, Database folder on different Directory, Backup Piece on different location

Scenario
Restore on Different Server, Database folders are on different Directory, Backup Piece on different location
Previous Server Prod, E:\snsd1011\, E:\archive1011\
New Server UAT D:\snsd1011\, D:\archive1011\

-----install oracle server10.2.0.3 without create startup db

------
mkdir D:\oracle\product\10.2.0\admin\sns1011\adump
mkdir D:\oracle\product\10.2.0\admin\sns1011\bdump
mkdir D:\oracle\product\10.2.0\admin\sns1011\cdump
mkdir D:\oracle\product\10.2.0\admin\sns1011\dpdump
mkdir D:\oracle\product\10.2.0\admin\sns1011\pfile
mkdir D:\oracle\product\10.2.0\admin\sns1011\udump
mkdir D:\archive1011\sns1011\arch
copy initsns1011.ora, tnsnames.ora,listener.ora to destination location and change paramater accordingly.

------
D:\>
oradim -new -sid sns1011 -SRVC OracleServicesns1011 -intpwd oracle -MAXUSERS 5 -STARTMODE auto -PFILE D:\oracle\product\10.2.0\db_1\database\initsns1011.ORA

----
lsnrctl stop
lsnrctl start
lsnrctl services
tnsping sns1011
----
sqlplusw sys/oracle@sns1011srv as sysdba

SQL>startup nomount pfile='D:\oracle\product\10.2.0\db_1\database\initsns6.ora';

-----
cmd
c:>
SET ORACLE_SID=sns6
RMAN TARGET SYS/linux@SNS1011SRV
shutdown immediate;
startup nomount;

RMAN>RESTORE CONTROLFILE FROM 'D:\archive1011\SNS1011\C-3554091374-20100603-00';

RMAN > SET DBID=3554091374

alter database MOUNT;

---------
RMAN>
list backup;
CROSSCHECK backup of database;
delete backup of database;
delete expired backup;
list backup;
delete backupset 146;
CROSSCHECK backup of controlfile;
delete backup of controlfile;
CROSSCHECK archivelog all;
delete force obsolete;
delete expired archivelog all;

---------

RMAN>CATALOG START WITH 'D:\archive1011\sns1011';

or

catalog backuppiece
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_1_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_2_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_3_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_4_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_5_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_6_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_7_T_721740034'
;

---------

RUN{
set newname for datafile 1 TO 'D:\SNSD1011\SYSTEM01.ORA';
set newname for datafile 2 TO 'D:\SNSD1011\UNDOTBS01.ORA';
set newname for datafile 3 TO 'D:\SNSD1011\SYSAUX01.ORA';
set newname for datafile 4 TO 'D:\SNSD1011\INDX01.ORA';
set newname for datafile 4 TO 'D:\SNSD1011\USERS01.ORA';
set newname for tempfile 5 TO 'D:\SNSD1011\TEMP01.ORA';
}

---------
SQL>
alter database rename file 'e:\snsd1011\system01.ora' to 'd:\snsd1011\system01.ora';
alter database rename file 'e:\snsd1011\users01.ora' to 'd:\snsd1011\users01.ora';
alter database rename file 'e:\snsd1011\UNDOTBS01.ora' to 'd:\snsd1011\UNDOTBS01.ora';
alter database rename file 'e:\snsd1011\SYSAUX01.ora' to 'd:\snsd1011\SYSAUX01.ora';
alter database rename file 'e:\snsd1011\INDX01.ora' to 'd:\snsd1011\INDEX01.ora';
alter database rename file 'e:\snsd1011\TEMP01.ora' to 'd:\snsd1011\TEMP01.ora';

alter database rename file 'e:\snsd1011\redo01.ora' to 'd:\snsd1011\redo01.ora';
alter database rename file 'e:\snsd1011\redo02.ora' to 'd:\snsd1011\redo02.ora';
alter database rename file 'e:\snsd1011\redo03.ora' to 'd:\snsd1011\redo03.ora';

------------
RESTORE DATABASE;
RECOVER DATABASE;
ALTER DATABASE OPEN RESETLOGS;

----------------------------

Restore Error to different server and different directory (RMAN ORA-01180 ORA-01110)


--------Error--------RMAN ORA-01180 ORA-01110----------

I have taken database backup (location E drive ) in backuppieces and restore it into test server (D Drive) but not able to restore

If Controlfile have datafiles location of E drive. Then Error fail to restore on D drive.
If I recreate Controlfile then Error of DBID mismatch


SET ORACLE_SID=sns6
RMAN TARGET SYS/linux@SNS1011SRV
shutdown immediate;
startup nomount;

RMAN>RESTORE CONTROLFILE FROM 'D:\archive1011\SNS1011\C-3554091374-20100603-00';
RMAN > SET DBID=3554091374

alter database MOUNT;

RMAN>
list backup;
CROSSCHECK backup of database;
delete backup of database;
delete expired backup;
list backup;
delete backupset 146;
CROSSCHECK backup of controlfile;
delete backup of controlfile;
CROSSCHECK archivelog all;
delete force obsolete;
delete expired archivelog all;
list backup;
RMAN> delete backuppiece 'E:\BACKUP\RMAN\SNS1011\RMANBACKUP_DB_SNS1011_S_157_P_1_T_721741509';

RMAN-06207: WARNING: 7 objects could not be deleted for DISK channel(s) due to mismatched statuS.


RMAN>CATALOG START WITH 'D:\archive1011\sns1011';

or

catalog backuppiece
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_1_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_2_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_3_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_4_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_5_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_6_T_721740034',
'D:\archive1011\sns1011\RMANBACKUP_DB_SNS1011_S_156_P_7_T_721740034'
;



RUN{
set newname for datafile 1 TO 'D:\SNSD1011\SYSTEM01.ORA';
set newname for datafile 2 TO 'D:\SNSD1011\UNDOTBS01.ORA';
set newname for datafile 3 TO 'D:\SNSD1011\SYSAUX01.ORA';
set newname for datafile 4 TO 'D:\SNSD1011\INDX01.ORA';
set newname for datafile 4 TO 'D:\SNSD1011\USERS01.ORA';
set newname for tempfile 5 TO 'D:\SNSD1011\TEMP01.ORA';
}

RESTORE DATABASE;



Starting restore at 20-FEB-11
using channel ORA_DISK_1

channel ORA_DISK_1: starting datafile backupset restore
channel ORA_DISK_1: specifying datafile(s) to restore from backup set
restoring datafile 00001 to E:\SNSD1011\SYSTEM01.ORA
restoring datafile 00002 to E:\SNSD1011\UNDOTBS01.ORA
restoring datafile 00003 to E:\SNSD1011\SYSAUX01.ORA
restoring datafile 00004 to E:\SNSD1011\INDX01.ORA
restoring datafile 00005 to E:\SNSD1011\USERS01.ORA
channel ORA_DISK_1: reading from backup piece D:\ARCHIVE1011\SNS1011\RMANBACKUP_
DB_SNS1011_S_156_P_1_T_721740034
ORA-19870: error reading backup piece D:\ARCHIVE1011\SNS1011\RMANBACKUP_DB_SNS10
11_S_156_P_1_T_721740034
ORA-19504: failed to create file "E:\SNSD1011\USERS01.ORA"
ORA-27040: file create error, unable to create file
OSD-04002: unable to open file
O/S-Error: (OS 21) The device is not ready.
failover to previous backup

creating datafile fno=1 name=E:\SNSD1011\SYSTEM01.ORA
RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-03002: failure of restore command at 02/20/2011 20:48:10
ORA-01180: can not create datafile 1
ORA-01110: data file 1: 'E:\SNSD1011\SYSTEM01.ORA'


----------------Solution-----------------------

SQL>
alter database rename file 'e:\snsd1011\system01.ora' to 'd:\snsd1011\system01.ora';
alter database rename file 'e:\snsd1011\users01.ora' to 'd:\snsd1011\users01.ora';
alter database rename file 'e:\snsd1011\UNDOTBS01.ora' to 'd:\snsd1011\UNDOTBS01.ora';
alter database rename file 'e:\snsd1011\SYSAUX01.ora' to 'd:\snsd1011\SYSAUX01.ora';
alter database rename file 'e:\snsd1011\INDX01.ora' to 'd:\snsd1011\INDEX01.ora';
alter database rename file 'e:\snsd1011\TEMP01.ora' to 'd:\snsd1011\TEMP01.ora';

alter database rename file 'e:\snsd1011\redo01.ora' to 'd:\snsd1011\redo01.ora';
alter database rename file 'e:\snsd1011\redo02.ora' to 'd:\snsd1011\redo02.ora';
alter database rename file 'e:\snsd1011\redo03.ora' to 'd:\snsd1011\redo03.ora';

----------------------

Wednesday, February 16, 2011

Data Pump

USING DATA PUMP
TABLE EXPORT IMPORT
expdp scott/tiger@db10g tables=EMP,DEPT directory=TEST_DIR dumpfile=EMP_DEPT.dmp logfile=expdpEMP_DEPT.log

impdp scott/tiger@db10g tables=EMP,DEPT directory=TEST_DIR dumpfile=EMP_DEPT.dmp logfile=impdpEMP_DEPT.log



1. The following SQL statements creates a user, a directory object named dpump_dir1 and grants the permissions to the user.

SQLPLUS ldbo/linux@sns0809srv as sysdba
SQL> create user dpuser identified by dpuser;
SQL> grant connect, resource to dpuser;
SQL> CREATE DIRECTORY dpump_dir1 AS 'e:\dpdirectory';
SQL> grant read, write on directory dpump_dir1 to dpuser


$ expdp dpuser/dpuser@sns0809srv schemas=dpuser directory=dpump_dir1 dumpfile=dpuser.dmp log=dpuser.log
$expdp dpuser/dpuser@sns0809srv schemas=dpuser directory=dpump_dir1 dumpfile=dpuser2.dmp logfile=dpuser.log
$expdp dpuser/dpuser@TDB10G schemas=dpuser directory=dpump_dir1 parallel=4 dumpfile=dpuser_%U.dmp logfile=dpuser.log



---------------difference between traditional exp/imp and data pump-------

The main differences are listed below:
1)Expdp/Impdp access files on the server rather than on the client.

2))Expdp/Impdp operate on a group of files called a dump file set rather than on a single sequential dump file.

3)To improved performance Impdp/Expdp use parallel execution rather than a single stream of execution.
1) Ability to estimate jobs times
2) Ability to restart failed jobs
3) Perform fine-grained object selection
4) Monitor running jobs
5) Directly load a database from a remote instance via the network
6) Remapping capabilities
7) Improved performance using parallel executions

-------

DBMS_DATAPUMP package


SQLPLUS system/manager@TDB10G as sysdba



SQL> create user dpuser identified by dpuser;
grant connect, resource to dpuser;
CREATE DIRECTORY dpump_dir1 AS 'E:\app\kshitij';
grant read, write on directory dpump_dir1 to dpuser;

========
$ expdp dpuser/dpuser@orcl schemas=dpuser

include= TABLE:\"IN (\'EMP\', \'DEPT\')\"

directory=dpump_dir1 dumpfile=dpuser.dmp logfile=dpuser.log



$expdp dpuser/dpuser@TDB10G schemas=dpuser

exclude=TABLE:\"= \'EMP_DETAILS\'\"

directory=dpump_dir1 dumpfile=dpuser2.dmp lo
==============
The following steps list the basic activities involved in using Data Pump API.

1. Execute DBMS_DATAPUMP.OPEN procedure to create job.

2. Define parameters for the job like adding file and filters etc.

3. Start the job.

4. Optionally monitor the job until it completes.

5. Optionally detach from job and attach at later time.

6. Optionally, stop the job

7. Restart the job that was stopped.

Example of the above steps:



Declare

P_handle number; --- -- Data Pump job handle

P_last_job_state varchar2(45); ---- -- To keep track of job state

P_job_state varchar2(45);

P_status ku$_Status ----- -- The status object returned by get_status

BEGIN

P_handle:=DBMS_DATAPUMP.OPEN ('EXPORT','SCHEMA', NULL,'EXAMPLE','LATEST');



-- Specify a single dump file for the job (using the handle just returned)

-- and a directory object, which must already be defined and accessible

-- to the user running this procedure



DBMS_DATAPUMP.ADD_FILE (p_handle,'example.dmp','DMPDIR');



-- A metadata filter is used to specify the schema that will be exported.



DBMS_DATAPUMP.METADATA_FILTER (p_handle,'SCHEMA_EXPR','IN (''dpuser'')');



-- Start the job. An exception will be generated if something is not set up

-- Properly.



DBMS_DATAPUMP.start_job (p_handle);



----The export job should now be running.

The status of the job can be checked by writing a separate procedure and capturing the errors and status until it is completed. Overall job status can also be obtained by querying “SELECT * from dba_datapump_jobs”.



=======================

Data Pump ( expdp / impdp )

Now in 10g we can have total control over the job running (stop it, pause it, check it, restart it). Data pump is a server side technology and it can transfer large amounts of data very quickly using parallel streams to achieve maximum throughput, they can be 15-45% faster than the older import/export utilities.

Advantages using data pump

1) Ability to estimate jobs times
2) Ability to restart failed jobs
3) Perform fine-grained object selection
4) Monitor running jobs
5) Directly load a database from a remote instance via the network
6) Remapping capabilities
7) Improved performance using parallel executions
Note (1) You cannot export to a tape device only to disk, and the import will only work with version of oracle 10.1 or greater.

Note (2) The expdp and impdp are command line tools and run from within the Operating System.

Use of data pump

1) Migrating databases
2) Copying databases
3) Transferring oracle databases between different operating systems
4) Backing up important tables before you change them
5) Moving database objects from one tablespace to another
6) Transporting tablespace's between databases
7) Reorganizing fragmented table data
8) Extracting the DDL for tables and other objects such as stored procedures and packages

Data Pump components

1) dbms_datapump - the main engine for driving data dictionary metadata loading and unloading 2) dbms_metadata - used to extract the appropriate metadata
3) command-line - expdp and impdp are the import/export equivalents

Data Access methods

1) Direct path

2) External table path

Direct Path
bypasses the database buffer cache and writes beyond the high water mark when finished adjusts the high water mark, No undo is generated and can switch off redo as well, minimal impact to users as does not use SGA. Must disable triggers on tables before use.
External Path
Uses the database buffer cache acts as a SELECT statement into a dump file, during import reconstructs statements into INSERT statements, so whole process is like a normal SELECT/INSERT job. Both undo and redo are generated and uses a normal COMMIT just like a DML statement would.
In the following cases oracle will use the external path if any of the below are in use
1) clustered tables
2) active triggers in the table
3) a single partition in a table with a global index
4) referential integrity constraints
5) domain indexes on LOB columns
6) tables with fine-grained access control enabled in the insert mode
7) tables with BFILE or opaque type columns

Data Pump files

All files will be created on the server.

1) Dump files - holds the data and metadata
2) log files - the resulting output from the data pump command
3) sql files - contain the DDL statements describing the objects included in the job but can contain data

Master data pump tables - when using datapump it will create tables within the schema, this is used for controlling the datapump job, the table is removed when finished.

Data Pump privileges

1) exp_full_database 2) imp_full_database


How Data Pump works


The Master Control Process (MCP), has the process name DMnn, only one master job runs per job which controls the whole Data Pump job, it performs the following
create jobs and controls them
creates and manages the worker processes
monitors the jobs and logs the process
maintains the job state and restart information in the master table (create in the users schema running the job)
manages the necessary files including the dump file set
The master process creates a master table which contains job details (state, restart info), this table is created in the users schema who is running the Data Pump job. Once the job has finished it dumps the table contents into the data pump file and deletes the table. When you import the data pump file it re-creates the table and reads it to verify the correct sequence in which the it should import the various database objects.
The worker process is named DWnn and is the process that actually performs the work, you can have a number of worker process running on the same job (parallelism). The work process updates the master table with the various job status.
The shadow process is created when the client logs in to the oracle server it services data pump API requests, it creates the job consisting of the master table and the master process.
The client processes are the expdp and impdp commands.

Examples

Exporting database
expdp vallep/password directory=datapump full=y dumpfile=data.dmp filesize=2G parallel=2 logfile=full.log
Note: increase the parallel option based on the number of CPU's you have

Exporting schema
expdp sys/password schemas=testuser dumpfile=data.dmp logfile=schema.log

table
expdp vallep/password tables=accounts,employees dumpfile=data.dmp content=metadata_only tablespace

expdp vallep/password tablespaces=users dumpfile=data.dmp logfile=tablespace.log

Importing database
impdp system/password full=y dumpfile=data.dmp nologfile=y

Importing schema change
impdp system/password schemas=’HR’ remap_schema=’HR:HR_TEST’ content=data_only

impdp system/passwd remap_schema=’TEST:TEST3’ tables=test log=… dumpfile=… directory=…Other Options

directory
specifies a oracle directory object

filesize
split the dump file into specific sizes (could be used if filesystem has 2GB limit)

parfile
specify the parameter file

content
contents option can be ALL, METADATA_ONLY or DATA_ONLY

compression
compression is used by default but you can stop it

exclude/include
metadata filtering

query
selectively export table data using a SQL statement

estimate
Calculate job estimates where the vaild keywords are blocks and statistics

estimate_only
Calculate job estimates without performing the export

network link
you can perform a export across a network

encryption
you can encrypt data within the data pump file

parallel
increase worker processes to increase throughput, base it on number of CPU's

remap_schema
move objects from one schema to another

remap_datafile
change the name of the datafile when moving across different systems

remap_tablespace
move from one tablespace to another

Useful Views
DBA_DATAPUMP_JOBS

summary information of all currently running data pump jobs
DBA_DATAPUMP_SESSIONS

displays the user currently running data pump jobs
V$SESSION_LONGOPS

display information like totalwork, sofar, units and opname
The units of work done so far
Privileges
IMP_FULL_DATABASE
required if using advanced features
EXP_FULL_DATABASE
required if using advanced features

DBMS_DATAPUMP package
The package dbms_datapump can be used for the following
starting/stopping/restarting a job
monitoring a job
detaching from a job

exporting

declare d1 number;begin d1 := dbms_datapump.open('export','schema',null, 'test1', 'latest'); dbms_datapump.add_file(d1, 'test1.dmp', 'dmpdir'); dbms_datapump.metadata_filter(d1, 'schema_expr','in (''OE'')'); dbms_datapump.start_job(d1); dbms_datadump.detach(d1);end;


importing
declare d1 number;begin d1 := dbms_datapump.open('import','full',null, 'test1'); dbms_datapump.add_file(d1, 'test1.dmp', 'dmpdir'); dbms_datapump.metadata_remap(d1, 'remap_schema', 'oe', 'hr'); dbms_datapump.start_job(d1); dbms_datadump.detach(d1);end;

============================
REMAP

REMAP_TABLESPACE – This allows you to easily import a table into a different tablespace from which it was originally exported. The databases have to be 10.1 or later.

> impdp username/password REMAP_TABLESPACE=tbs_1:tbs_6 DIRECTORY =dpumpdir1 DUMPFILE=employees.dmp

REMAP_DATAFILES – This is a very useful feature when you move databases between platforms that have different file naming conventions. This parameter changes the source datafile name to the target datafile name in all SQL
statements where the source datafile is referenced. Because the REMAP_DATAFILE value uses quotation marks, it’s best to specify the parameter within a parameter file.

The parameter file, payroll.par, has the following content:

DIRECTORY=dpump_dir1
FULL=Y
DUMPFILE=db_full.dmp
REMAP_DATAFILE=”’C:\DB1\HRDATA\PAYROLL\tbs6.dbf’:’/db1/hrdata/pa
yroll/tbs6.dbf’”

You can then issue the following command:

> impdp username/password PARFILE=payroll.par
================

Maximizing the Power of Oracle Data Pump

Data Pump works great with default parameters, but once you are comfortable with Data Pump, there are new capabilities that you will want to explore.

Parallelism

Data Pump Export and Import operations are processed in the database as a Data Pump job, which is much more efficient that the client-side execution of original Export and Import. Now Data Pump operations can take advantage of the server’s parallel processes to read or write multiple data streams simultaneously (PARALLEL is only available in the Enterprise Edition of Oracle Database.) The number of parallel processes can be changed on the fly using Data Pump’s interactive command-line mode. You may have a certain number of processes running during the day and decide to change that number if more system resources become available at night (or vice versa).

For best performance, you should do the following:

1. Make sure your system is well balanced across CPU, memory, and I/O.
2. Have at least one dump file for each degree of parallelism. If there aren’t enough dump files, performance will not be optimal because multiple threads of execution will be trying to access the same dump file.
3. Put files that are members of a dump file set on separate disks so that they will be written and read in parallel.
4. For export operations, use the %U variable in the DUMPFILE parameter so multiple dump files can be automatically generated.

> expdp username/password DIRECTORY=dpump_dir1 JOB_NAME=hr DUMPFILE=par_exp%u.dmp PARALLEL=4

-------

Followers