select b.job_name, b.job_description, b.job_owner, a.scheduled_time, a.end_time,
DECODE(a.status,
1, 'SCHEDULED',
2, 'RUNNING',
3, 'FAILED INIT',
4, 'FAILED',
5, 'SUCCEEDED',
6, 'SUSPENDED',
7, 'AGENT DOWN',
8, 'STOPPED',
9, 'SUSPENDED/LOCK',
10, 'SUSPENDED/EVENT',
11, 'SUSPENDED/BLACKOUT',
12, 'STOP PENDING',
13, 'SUSPEND PENDING',
14, 'INACTIVE',
15, 'QUEUED',
16, 'FAILED/RETRIED',
17, 'WAITING',
18, 'SKIPPED', a.status) "STATUS"
from mgmt_job_exec_summary a, mgmt_job b
order by b.job_name, a.scheduled_time;
Karen's Database Notes
This is a place where I can record all of my helpful Database tips.
Friday, April 22, 2011
Thursday, February 17, 2011
Use PIVOT to Create a Cross-Tabulation
This query will generate a table that shows each employee and the number of days they have called in for either Sick, Bereavement or MISC. The leave_type_code is a single database column, but the PIVOT allows you to make several columns from its values.
WITH pivot_data AS (
select ed.employee_id NAME, psa.leave_type_code LTC, psa.shift_date SHIFT_DATE
from
employee_detail ed, pa_shift_allocation psa
where ed.employee_id=psa.employee_id
and ed.is_fake='N'
and psa.assignment_type_code='L'
and psa.shift_date BETWEEN '01-JAN-10' and '31-DEC-10')
select * from pivot_data
pivot (count(shift_date) for LTC in ('S' as SK,'B' as BV,'M'as MISC))
order by SK DESC;
NAME SK BV MISC
-------------------- ---------------------- ---------------------- ----------------------
EM1018 121 0 0
EM1034 61 5 0
EM1022 60 0 156
EM1019 60 0 154
EM1065 35 1 0
EM1107 33 5 0
EM1029 24 0 0
EM1136 23 2 0
EM1134 21 0 0
EM1151 21 2 0
EM1025 19 0 40
WITH pivot_data AS (
select ed.employee_id NAME, psa.leave_type_code LTC, psa.shift_date SHIFT_DATE
from
employee_detail ed, pa_shift_allocation psa
where ed.employee_id=psa.employee_id
and ed.is_fake='N'
and psa.assignment_type_code='L'
and psa.shift_date BETWEEN '01-JAN-10' and '31-DEC-10')
select * from pivot_data
pivot (count(shift_date) for LTC in ('S' as SK,'B' as BV,'M'as MISC))
order by SK DESC;
NAME SK BV MISC
-------------------- ---------------------- ---------------------- ----------------------
EM1018 121 0 0
EM1034 61 5 0
EM1022 60 0 156
EM1019 60 0 154
EM1065 35 1 0
EM1107 33 5 0
EM1029 24 0 0
EM1136 23 2 0
EM1134 21 0 0
EM1151 21 2 0
EM1025 19 0 40
Wednesday, December 22, 2010
Remove all alphabetic characters from a string
select to_number(replace(translate(upper(column_name),'ABCDEFGHIJKLMNOPQRSTUVWXYZ','X'),'X')) FOO from table_name order by FOO;
Example:
select to_number(replace(translate(upper('123xj456iu789'),'ABCDEFGHIJKLMNOPQRSTUVWXYZ','X'),'X')) FOO from dual
FOO
------
123456789
1 rows selected
Example:
select to_number(replace(translate(upper('123xj456iu789'),'ABCDEFGHIJKLMNOPQRSTUVWXYZ','X'),'X')) FOO from dual
FOO
------
123456789
1 rows selected
Friday, December 10, 2010
Snapshot Too Old Error During Export
ORA-31693: Table data object failed to load/unload and is being skipped due to error:
ORA-02354: error in exporting/importing data
ORA-01555: snapshot too old: rollback segment number 11 with name "_SYSSMU11_1238457026$" too small
This occurred after we began using the FLASHBACK_TIME parameter for exports.
Thank you to the person who posted the solution here: http://www.eggheadcafe.com/software/aspnet/36119822/snapshot-too-old-error.aspx
This error occures, when at least 2 conditions are met:
1.) A long running Query (q)
2.) At least 1 short transaction (t) that modifies AND commits data
which will be selected by (q)
When (q) starts, Oracle guarantees read consistency, meaning you can
be sure that the result set of (q) reflects the state the db was in at
the start of (q).
If transaction (t) - started AFTER you started (q) - modifies data
that will be selected by (q) later on (because (q) is a long running
query), (q) will read the old, unmodified data out of the undo
segments and everything is as it should be.
The problem now is, that after (t) commits, the undo segments are
still holding the consistent data block(s), but are released and may
be used (and overwritten) by other transactions.
When (q) discovers the situation above, it will terminate with
ORA-1555.
There are basically 2 solutions to this problem:
a.) Don't do transactions while running such an extended query (which
will not be possible in real environments)
b.) Change the undo_retention - parameter of your database.
UNDO_RETENTION (the default is 900 seconds) tells the system how
long to wait before reusing the undo - segments after they have been
released by the transaction.
So if your pump-export needs i.e. 1 hour you should set this parameter
to 3600 or higher.
Note however, that undo_retention will be set for ALL undo - segments
in your system, so - depending on your transaction structure - you
might need a bigger undo-tablespace.
ORA-02354: error in exporting/importing data
ORA-01555: snapshot too old: rollback segment number 11 with name "_SYSSMU11_1238457026$" too small
This occurred after we began using the FLASHBACK_TIME parameter for exports.
Thank you to the person who posted the solution here: http://www.eggheadcafe.com/software/aspnet/36119822/snapshot-too-old-error.aspx
This error occures, when at least 2 conditions are met:
1.) A long running Query (q)
2.) At least 1 short transaction (t) that modifies AND commits data
which will be selected by (q)
When (q) starts, Oracle guarantees read consistency, meaning you can
be sure that the result set of (q) reflects the state the db was in at
the start of (q).
If transaction (t) - started AFTER you started (q) - modifies data
that will be selected by (q) later on (because (q) is a long running
query), (q) will read the old, unmodified data out of the undo
segments and everything is as it should be.
The problem now is, that after (t) commits, the undo segments are
still holding the consistent data block(s), but are released and may
be used (and overwritten) by other transactions.
When (q) discovers the situation above, it will terminate with
ORA-1555.
There are basically 2 solutions to this problem:
a.) Don't do transactions while running such an extended query (which
will not be possible in real environments)
b.) Change the undo_retention - parameter of your database.
UNDO_RETENTION (the default is 900 seconds) tells the system how
long to wait before reusing the undo - segments after they have been
released by the transaction.
So if your pump-export needs i.e. 1 hour you should set this parameter
to 3600 or higher.
Note however, that undo_retention will be set for ALL undo - segments
in your system, so - depending on your transaction structure - you
might need a bigger undo-tablespace.
Datapump Export Example Using FLASHBACK_TIME
Use this as a parameter value when creating nightly export job in OEM. This uses SYSDATE for the FLASHBACK_TIME. It also appends the current date to the dump and log file names.
SET SERVEROUTPUT ON
variable exit_code NUMBER
DECLARE
h1 NUMBER;
js VARCHAR2(20);
dt VARCHAR2(12);
begin
:exit_code := 0;
select to_char(sysdate,'YYYYMONDD') into dt from dual;
h1 := dbms_datapump.open (operation => 'EXPORT', job_mode => 'FULL', job_name => 'PRODDB_EXPORT', version => 'COMPATIBLE');
dbms_datapump.set_parallel(handle => h1, degree => 1);
dbms_datapump.add_file(handle => h1, filename => 'PRODDBEXP' ||dt||'.LOG', directory => 'PRODDB_EXP_DIR', filetype => 3);
dbms_datapump.set_parameter(handle => h1, name => 'KEEP_MASTER', value => 0);
dbms_datapump.add_file(handle => h1, filename => 'PRODDBEXP'||dt||'.DMP', directory => 'PRODDB_EXP_DIR', filetype => 1);
dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_TIME', value => 'TO_TIMESTAMP (to_char(sysdate,''dd.mm.yyyy HH24:MI:SS''),''dd.mm.yyyy HH24:MI:SS'')');
dbms_datapump.set_parameter(handle => h1, name => 'INCLUDE_METADATA', value => 1);
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'AUTOMATIC');
dbms_datapump.start_job(handle => h1, skip_current => 0, abort_step => 0);
dbms_datapump.detach(handle => h1);
EXCEPTION
WHEN OTHERS THEN
dbms_output.put_line(TO_CHAR(SQLCODE)||' :'||SQLERRM);
:exit_code := 2;
ROLLBACK;
end;
/
exit :exit_code;
SET SERVEROUTPUT ON
variable exit_code NUMBER
DECLARE
h1 NUMBER;
js VARCHAR2(20);
dt VARCHAR2(12);
begin
:exit_code := 0;
select to_char(sysdate,'YYYYMONDD') into dt from dual;
h1 := dbms_datapump.open (operation => 'EXPORT', job_mode => 'FULL', job_name => 'PRODDB_EXPORT', version => 'COMPATIBLE');
dbms_datapump.set_parallel(handle => h1, degree => 1);
dbms_datapump.add_file(handle => h1, filename => 'PRODDBEXP' ||dt||'.LOG', directory => 'PRODDB_EXP_DIR', filetype => 3);
dbms_datapump.set_parameter(handle => h1, name => 'KEEP_MASTER', value => 0);
dbms_datapump.add_file(handle => h1, filename => 'PRODDBEXP'||dt||'.DMP', directory => 'PRODDB_EXP_DIR', filetype => 1);
dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_TIME', value => 'TO_TIMESTAMP (to_char(sysdate,''dd.mm.yyyy HH24:MI:SS''),''dd.mm.yyyy HH24:MI:SS'')');
dbms_datapump.set_parameter(handle => h1, name => 'INCLUDE_METADATA', value => 1);
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'AUTOMATIC');
dbms_datapump.start_job(handle => h1, skip_current => 0, abort_step => 0);
dbms_datapump.detach(handle => h1);
EXCEPTION
WHEN OTHERS THEN
dbms_output.put_line(TO_CHAR(SQLCODE)||' :'||SQLERRM);
:exit_code := 2;
ROLLBACK;
end;
/
exit :exit_code;
Find Records Having Column with Specific Value
SET SERVEROUTPUT ON
DECLARE
CURSOR tab_cur is select * from user_tab_columns where column_name = 'COLUMN_NAME';
cnt NUMBER;
query_str VARCHAR2(1000);
BEGIN
for rec in tab_cur
loop
query_str := 'select count (*) from ' || rec.table_name || ' where COLUMN_NAME = ''&value''';
EXECUTE IMMEDIATE query_str into cnt;
IF cnt > 0 THEN DBMS_OUTPUT.PUT_LINE('There are ' || cnt || ' records in ' || rec.table_name);
END IF;
--DBMS_OUTPUT.PUT_LINE(query_str);
END LOOP;
END ;
/
DECLARE
CURSOR tab_cur is select * from user_tab_columns where column_name = 'COLUMN_NAME';
cnt NUMBER;
query_str VARCHAR2(1000);
BEGIN
for rec in tab_cur
loop
query_str := 'select count (*) from ' || rec.table_name || ' where COLUMN_NAME = ''&value''';
EXECUTE IMMEDIATE query_str into cnt;
IF cnt > 0 THEN DBMS_OUTPUT.PUT_LINE('There are ' || cnt || ' records in ' || rec.table_name);
END IF;
--DBMS_OUTPUT.PUT_LINE(query_str);
END LOOP;
END ;
/
Compile Invalid Objects
-- COMPILE INVALID TRIGGERS, FUNCTIONS, PROCEDURES, PACKAGES
EXEC DBMS_UTILITY.compile_schema(user);
-- COMPILE INVALID PACKAGES, TRIGGERS, VIEWS
BEGIN
FOR cur_rec IN (SELECT object_name, object_type FROM user_objects WHERE object_type in ('PACKAGE','TRIGGER','VIEW') AND status != 'VALID')
LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER ' || cur_rec.object_type ||
' "' || cur_rec.object_name || '" COMPILE';
END;
END LOOP;
END;
/
EXEC DBMS_UTILITY.compile_schema(user);
-- COMPILE INVALID PACKAGES, TRIGGERS, VIEWS
BEGIN
FOR cur_rec IN (SELECT object_name, object_type FROM user_objects WHERE object_type in ('PACKAGE','TRIGGER','VIEW') AND status != 'VALID')
LOOP
BEGIN
EXECUTE IMMEDIATE 'ALTER ' || cur_rec.object_type ||
' "' || cur_rec.object_name || '" COMPILE';
END;
END LOOP;
END;
/
Subscribe to:
Posts (Atom)