Schema Refresh using ASM Diskgroups
How to create Datapump Export Dumps within ASM diskgroups
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
EXPORT (EXPDP)
$ps -ef| grep pmon
$. oraenv
ORACLE_SID = [dbinst1] ? +ASM1
The Oracle base for ORACLE_HOME=/opt/app/11.2.0/grid is /opt/app/oracle
$asmcmd
ASMCMD> cd DATA01/TESTDB
ASMCMD> mkdir EXPDP
1. Create a directory in ASM.
SQL> create or replace directory DATAPUMP_ASM_DIR as '+DATA01/TESTDB/EXPDP';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_ASM_DIR TO SYSTEM;
2. Create a logfile directory in filesystem, since logfiles can't be stored in ASM.
SQL> create or replace directory DATAPUMP_LOG_DIR as '/oracle/backup/testdb/expdp';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_LOG_DIR TO SYSTEM;
3. Create logfile directory in filesystem.
$ cd /oracle/backup/testdb/
$ mkdir expdp
$ chmod -R 755 expdp
$ cd expdp
$pwd
/oracle/backup/testdb/expdp
$ df -h /oracle/backup/testdb/expdp -- or
$df -gt /oracle/backup/testdb/expdp
$ sqlplus "/as sysdba"
set pages 50000 lines 32767
col DIRECTORY_PATH for a80;
select * from dba_directories;
OWNER DIRECTORY_NAME DIRECTORY_PATH
------- ---------------- ---------------------------------------
SYS DATAPUMP_ASM_DIR +DATA01/testdb/expdp
SYS DATAPUMP_LOG_DIR /oracle/backup/testdb/expdp
Where DATAPUMP_ASM_DIR is a ASM based directory
and DATAPUMP_LOG_DIR is on a filesystem.
Here, Export is written to ASM based dictionary,
but log is written to another directory which is on filesystem.
We can create export dumps to the ASM diskgroups. But we can’t write expdp log to ASM (as it is a text file).
$vi schema_export_in_ASM.par
user_id="/ as sysdba"
directory=DATAPUMP_ASM_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=DATAPUMP_LOG_DIR:schema_export_in_ASM.log
schemas=schema1,schema2
exclude=statistics
parallel=6
compression=all
cluster=n ---- 11g
:wq
$nohup expdp parfile=schema_export_in_ASM.par &
$tail -f nohup.out or
$tail -f schema_export_in_ASM.log
==================================================================================================================
scp dumpfiles from Source Server ASM to Destination Server ASM
==================================================================================================================
Take schema backup and Drop Schema Objects
==================================================================================================================
IMPORT (IMPDP)
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
$ps -ef| grep pmon
$. oraenv
ORACLE_SID = [dbinst1] ? +ASM1
The Oracle base for ORACLE_HOME=/opt/app/11.2.0/grid is /opt/app/oracle
$asmcmd
ASMCMD> cd DATA01/TESTDB
ASMCMD> mkdir IMPDP
1. Create a directory in ASM.
SQL> create or replace directory DATAPUMP_ASM_DIR as '+DATA01/TESTDB/IMPDP';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_ASM_DIR TO SYSTEM;
2. Create a logfile directory in filesystem, since logfiles can't be stored in ASM.
SQL> create or replace directory DATAPUMP_LOG_DIR as '/oracle/backup/testdb/impdp';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_LOG_DIR TO SYSTEM;
3. Create logfile directory in filesystem.
$ cd /oracle/backup/testdb/
$ mkdir impdp
$ chmod -R 755 impdp
$ cd impdp
$pwd
/oracle/backup/testdb/impdp
$ sqlplus "/as sysdba"
set pages 50000 lines 32767
col DIRECTORY_PATH for a80;
select * from dba_directories;
OWNER DIRECTORY_NAME DIRECTORY_PATH
------- ---------------- ---------------------------------------
SYS DATAPUMP_ASM_DIR +DATA01/testdb/impdp
SYS DATAPUMP_LOG_DIR /oracle/backup/testdb/impdp
Where DATAPUMP_ASM_DIR is a ASM based directory
and DATAPUMP_LOG_DIR is on a filesystem.
Here, Export is written to ASM based dictionary,
but log is written to another directory which is on filesystem.
We can create export dumps to the ASM diskgroups. But we can’t write expdp log to ASM (as it is a text file).
$vi import_schema_export_in_ASM.par
user_id="/ as sysdba"
directory=DATAPUMP_ASM_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=DATAPUMP_LOG_DIR:import_schema_export_in_ASM.log
schemas=schema1,schema2
parallel=6
cluster=n ---- 11g
If required,
REMAP_SCHEMA=source_schema1:destination_schema1,source_schema2:destination_schema2
REMAP_TABLESPACE=source_tablespace1:destination_tablespace1,source_tablespace2:destination_tablespace2
:wq
$nohup impdp parfile=import_schema_export_in_ASM.par &
$tail -f nohup.out
$tail -f import_schema_export_in_ASM.log
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
Script to take user password backup in Oracle 11g
Script to take user password backup in Oracle 11g
$ sqlplus "/as sysdba"
set pages 50000 lines 32767
select 'alter user ' || name || ' identified by values "' || password || "';' from sys.user$
where name in ('&name');
USER DDL
SET LONG 99999999
select dbms_metadata.get_ddl('USER','&username') from dual;
Labels: SCHEMA REFRESH, SCRIPT, USER
June 01, 2013
How to Drop SCHEMA Objects in Oracle
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
DROP SCHEMA OBJECTS
-------------------
set head off
set pagesize 0
set linesize 300
spool target_schemaname_drop_obj.sql
select 'drop '||object_type||' '||owner||'.'||object_name||decode(object_type,'TABLE',' CASCADE CONSTRAINTS;',';') from dba_objects where owner in
('&owner') and object_type not in ('DATABASE LINK','JOB','LOB') order by object_type,object_name
/
spool off;
set head on
@ target_schemaname_drop_obj.sql
exit
DROP OTHER OBJECTS (If required)
------------------
set head off
set pagesize 0
set linesize 300
spool target_schemaname_drop_other_obj.sql
select 'DROP '||decode(object_type, 'PACKAGE BODY', 'PACKAGE',object_type)|| ' '||owner||'.'||object_name||decode(object_type,'TABLE',' CASCADE
CONSTRAINTS;',';')
from dba_objects where owner=upper('&owner') and object_type not in ('TABLE')
/
spool off;
set head on
@target_schemaname_drop_other_obj.sql
exit
ls -lrth target_schemaname_drop*.sql
vi target_schemaname_drop_obj.sql / vi target_schemaname_drop_other_obj.sql
:wq
sqlplus "/as sysdba"
@target_schemaname_drop_obj.sql
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
exit
SCHEMA REFRESH in ORACLE
Steps for SCHEMA REFRESH:-
------------------------
1. Capture the SOURCE database SCHEMA OBJECTS COUNT which are to be refreshed.
2. Take EXPORT of required SCHEMA(s) at SOURCE Database.
3. Copy the EXPORT DUMPFILE(s) to DESTINATION Database Server.(SCP)
4. Take EXPORT of required SCHEMA(s) at DESTINATION Database.(Recommended)
5. DROP the SCHEMA(s) objects at DESTINATION Database except DATABASE LINK (DB LINK).
6. IMPORT the Copied SOURCE EXPORT DUMPFILE(s) to the DESTINATION Database.
7. THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED.
8. Gather Schema statistics at DESTINATION Database after schema refresh.(Recommended)
SOURCE
------
Hostname :
Database Name :
Schema(s) Name :
DESTINATION
------
Hostname :
Database Name :
Schema(s) Name :
@ SOURCE
--------
Database Details:
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
FILE_NAME
---------
/FILE/LOCATION/
Check for the space availability for taking backup
--------------------------------------------------
df -h or df -gt
df -h /FILE/LOCATION/
cd /source/backup/location/
mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO
pwd
/source/backup/location/EXPORT_REF_NO
EXPDP
-----
col DIRECTORY_PATH for a80;
select * from dba_directories;
create or replace directory <EXPDPDIR> as '/source/backup/location/EXPORT_REF_NO';
grant read,write ON DIRECTORY <EXPDPDIR> to <SYSTEM>;
alter user <username> quota unlimited on <tablespace_name>; ---> If required
col DIRECTORY_PATH for a80;
select * from dba_directories;
PAR file
--------
vi SOURCE_SCHEMA_EXPDP.par
userid="/ as sysdba"
directory=DATAPUMP_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=source_schemaname_expdp.log
schemas=schemaname1,schemaname2
exclude=statistics
if required,
PARALLEL=6
JOB_NAME=SOURCE_SCHEMA_EXPDP
COMPRESSION=ALL
STATUS=60
FLASHBACK_TIME="TO_TIMESTAMP('24-APR-2013 16:00:00','DD-MON-YYYY HH24:MI:SS')"
--OR--
FLASHBACK_TIME="TO_TIMESTAMP('24-APR-2013 16:00:00','DD-MM-YYYY HH24:MI:SS')"
:wq
nohup expdp parfile=SOURCE_SCHEMA_EXPDP.par &
Check the status of EXPORT
--------------------------
## COMPRESSION - Reduce size of dumpfile contents, where valid keyword values are: (METADATA_ONLY) and NONE.
## You can export backup of database which was like certain months ago. This you can do using the FLASHBACK_TIME or FLASHBACK_SCN option.
## FLASHBACK_TIME - Time used to get the SCN closest to the specified time.The export operation is performed with data that is consistent as of this
SCN.
EXP
---
If the size of the schema is less better to go for EXP/IMP
nohup exp <user>/`pwd` file=source_schemaname_exp.dmp log=source_schemaname_exp.log owner=schemaname statistics=NONE resumable=y
resumable_name=continue resumable_timeout=18000
-----------------------------------------------------------------------------------------------------
SCP SOURCE DUMP FILES TO DESTINATION
----------------------------------------------------------
@ Source
--------
scp source_schemaname_expdp*.dmp oracle@target_hostname:/target/backup/location/EXPORT_REF_NO/
(OR)
@ Target
--------
cd /target/backup/location/EXPORT_REF_NO/
scp oracle@source_hostname:/source/backup/location/EXPORT_REF_NO/source_schemaname_expdp*.dmp .
pwd
/target/backup/location/EXPORT_REF_NO/
ls -lrth source_schemaname_expdp*.dmp --------> scp files are here
-----------------------------------------------------------------------------------------------------
@ Target
--------
Database Details:
----------------
Table Space Report
------------------
ASM Space Report
----------------
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
Check for the space availabilty for taking backup
-------------------------------------------------
df -h or df -gt
df -h /target/backup/location/
cd /target/backup/location/
mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO
pwd
/target/backup/location/EXPORT_REF_NO
EXPDP
-----
col DIRECTORY_PATH for a80;
select * from dba_directories;
PAR file
--------
vi TARGET_SCHEMA_EXPDP.par & -----> same as above PAR file at SOURCE DATABASE
:wq
nohup expdp parfile=TARGET_SCHEMA_EXPDP.par &
Check the status of EXPORT
--------------------------
EXP
---
If the size of the schema is less better to go for EXP/IMP
nohup exp userid=\'/ as sysdba \' file=target_schemaname_exp.dmp log=target_schemaname_exp.log owner=schemaname statistics=NONE resumable=y
resumable_name=continue resumable_timeout=18000 &
------------------------------------------------------------------------------------------------
Drop Schema(s) Objects
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
exit
NOTE:-
If required,To remove all dropped objects from the recyclebin (current user)
$sqlplus "/as sysdba"
SQL> PURGE DBA_RECYCLEBIN;
DBA Recyclebin purged.
-----------------------------------------------------------------------------------------------------
pwd
/target/backup/location/EXPORT_REF_NO/
ls -lrth source_schemaname_expdp*.dmp
Restore schema backup
---------------------
IMPDP
-----
vi import_source_schemaname_expdp.par
userid="/ as sysdba"
directory=EXPDPDIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=import_source_schemaname_expdp.log
schemas=schemaname1,schemaname2
PARALLEL=6
JOB_NAME=IMPORT_SCHEMA_EXPDP
If required,
remap_schema=<source_schema1>:<target_schema1>,<source_schema2>:<target_schema2>
remap_tablespace=<source_tablespace>:<target_tablespace>
exclude=constraint,ref_constraint,index
nohup impdp parfile=import_source_schemaname_expdp.par &
Validation of Import of Schemas
-------------------------------
set pages 50000 lines 32767
col owner format a15
col object_type format a20
col object_name format a30
select owner, object_name, object_type,status,created,last_ddl_time from dba_objects where owner in ('&owner') group by object_type,owner order by
object_type
/
IMP
---
nohup imp userid=\'/ as sysdba \' file=source_schemaname_exp.dmp log=source_schemaname_exp_import.log fromuser=schemaname touser=schemaname ignore=y
statistics=NONE commit=y resumable=y resumable_name=continue resumable_timeout=18000 &
-----------------------------------------------------------------------------------------------------
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
NOTE:
THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED
-----------------------------------------------------------------------------------------------------
Gather schema stats after schema refresh
----------------------------------------
set timing on
exec dbms_stats.gather_schema_stats(ownname=>'&schema_name', CASCADE=>TRUE,ESTIMATE_PERCENT=>30,degree =>4);
-----------------------------------------------------------------------------------------------------
NOTE: ONCE TASK COMPLETED AT PRODUCTION SERVER IMMEDIATLY DO EXIT.
How to create Datapump Export Dumps within ASM diskgroups
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
EXPORT (EXPDP)
$ps -ef| grep pmon
$. oraenv
ORACLE_SID = [dbinst1] ? +ASM1
The Oracle base for ORACLE_HOME=/opt/app/11.2.0/grid is /opt/app/oracle
$asmcmd
ASMCMD> cd DATA01/TESTDB
ASMCMD> mkdir EXPDP
1. Create a directory in ASM.
SQL> create or replace directory DATAPUMP_ASM_DIR as '+DATA01/TESTDB/EXPDP';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_ASM_DIR TO SYSTEM;
2. Create a logfile directory in filesystem, since logfiles can't be stored in ASM.
SQL> create or replace directory DATAPUMP_LOG_DIR as '/oracle/backup/testdb/expdp';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_LOG_DIR TO SYSTEM;
3. Create logfile directory in filesystem.
$ cd /oracle/backup/testdb/
$ mkdir expdp
$ chmod -R 755 expdp
$ cd expdp
$pwd
/oracle/backup/testdb/expdp
$ df -h /oracle/backup/testdb/expdp -- or
$df -gt /oracle/backup/testdb/expdp
$ sqlplus "/as sysdba"
set pages 50000 lines 32767
col DIRECTORY_PATH for a80;
select * from dba_directories;
OWNER DIRECTORY_NAME DIRECTORY_PATH
------- ---------------- ---------------------------------------
SYS DATAPUMP_ASM_DIR +DATA01/testdb/expdp
SYS DATAPUMP_LOG_DIR /oracle/backup/testdb/expdp
Where DATAPUMP_ASM_DIR is a ASM based directory
and DATAPUMP_LOG_DIR is on a filesystem.
Here, Export is written to ASM based dictionary,
but log is written to another directory which is on filesystem.
We can create export dumps to the ASM diskgroups. But we can’t write expdp log to ASM (as it is a text file).
$vi schema_export_in_ASM.par
user_id="/ as sysdba"
directory=DATAPUMP_ASM_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=DATAPUMP_LOG_DIR:schema_export_in_ASM.log
schemas=schema1,schema2
exclude=statistics
parallel=6
compression=all
cluster=n ---- 11g
:wq
$nohup expdp parfile=schema_export_in_ASM.par &
$tail -f nohup.out or
$tail -f schema_export_in_ASM.log
==================================================================================================================
scp dumpfiles from Source Server ASM to Destination Server ASM
==================================================================================================================
Take schema backup and Drop Schema Objects
==================================================================================================================
IMPORT (IMPDP)
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
$ps -ef| grep pmon
$. oraenv
ORACLE_SID = [dbinst1] ? +ASM1
The Oracle base for ORACLE_HOME=/opt/app/11.2.0/grid is /opt/app/oracle
$asmcmd
ASMCMD> cd DATA01/TESTDB
ASMCMD> mkdir IMPDP
1. Create a directory in ASM.
SQL> create or replace directory DATAPUMP_ASM_DIR as '+DATA01/TESTDB/IMPDP';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_ASM_DIR TO SYSTEM;
2. Create a logfile directory in filesystem, since logfiles can't be stored in ASM.
SQL> create or replace directory DATAPUMP_LOG_DIR as '/oracle/backup/testdb/impdp';
Directory created.
SQL> GRANT READ,WRITE ON DIRECTORY DATAPUMP_LOG_DIR TO SYSTEM;
3. Create logfile directory in filesystem.
$ cd /oracle/backup/testdb/
$ mkdir impdp
$ chmod -R 755 impdp
$ cd impdp
$pwd
/oracle/backup/testdb/impdp
$ sqlplus "/as sysdba"
set pages 50000 lines 32767
col DIRECTORY_PATH for a80;
select * from dba_directories;
OWNER DIRECTORY_NAME DIRECTORY_PATH
------- ---------------- ---------------------------------------
SYS DATAPUMP_ASM_DIR +DATA01/testdb/impdp
SYS DATAPUMP_LOG_DIR /oracle/backup/testdb/impdp
Where DATAPUMP_ASM_DIR is a ASM based directory
and DATAPUMP_LOG_DIR is on a filesystem.
Here, Export is written to ASM based dictionary,
but log is written to another directory which is on filesystem.
We can create export dumps to the ASM diskgroups. But we can’t write expdp log to ASM (as it is a text file).
$vi import_schema_export_in_ASM.par
user_id="/ as sysdba"
directory=DATAPUMP_ASM_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=DATAPUMP_LOG_DIR:import_schema_export_in_ASM.log
schemas=schema1,schema2
parallel=6
cluster=n ---- 11g
If required,
REMAP_SCHEMA=source_schema1:destination_schema1,source_schema2:destination_schema2
REMAP_TABLESPACE=source_tablespace1:destination_tablespace1,source_tablespace2:destination_tablespace2
:wq
$nohup impdp parfile=import_schema_export_in_ASM.par &
$tail -f nohup.out
$tail -f import_schema_export_in_ASM.log
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
Script to take user password backup in Oracle 11g
Script to take user password backup in Oracle 11g
$ sqlplus "/as sysdba"
set pages 50000 lines 32767
select 'alter user ' || name || ' identified by values "' || password || "';' from sys.user$
where name in ('&name');
USER DDL
SET LONG 99999999
select dbms_metadata.get_ddl('USER','&username') from dual;
Labels: SCHEMA REFRESH, SCRIPT, USER
June 01, 2013
How to Drop SCHEMA Objects in Oracle
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
DROP SCHEMA OBJECTS
-------------------
set head off
set pagesize 0
set linesize 300
spool target_schemaname_drop_obj.sql
select 'drop '||object_type||' '||owner||'.'||object_name||decode(object_type,'TABLE',' CASCADE CONSTRAINTS;',';') from dba_objects where owner in
('&owner') and object_type not in ('DATABASE LINK','JOB','LOB') order by object_type,object_name
/
spool off;
set head on
@ target_schemaname_drop_obj.sql
exit
DROP OTHER OBJECTS (If required)
------------------
set head off
set pagesize 0
set linesize 300
spool target_schemaname_drop_other_obj.sql
select 'DROP '||decode(object_type, 'PACKAGE BODY', 'PACKAGE',object_type)|| ' '||owner||'.'||object_name||decode(object_type,'TABLE',' CASCADE
CONSTRAINTS;',';')
from dba_objects where owner=upper('&owner') and object_type not in ('TABLE')
/
spool off;
set head on
@target_schemaname_drop_other_obj.sql
exit
ls -lrth target_schemaname_drop*.sql
vi target_schemaname_drop_obj.sql / vi target_schemaname_drop_other_obj.sql
:wq
sqlplus "/as sysdba"
@target_schemaname_drop_obj.sql
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
exit
SCHEMA REFRESH in ORACLE
Steps for SCHEMA REFRESH:-
------------------------
1. Capture the SOURCE database SCHEMA OBJECTS COUNT which are to be refreshed.
2. Take EXPORT of required SCHEMA(s) at SOURCE Database.
3. Copy the EXPORT DUMPFILE(s) to DESTINATION Database Server.(SCP)
4. Take EXPORT of required SCHEMA(s) at DESTINATION Database.(Recommended)
5. DROP the SCHEMA(s) objects at DESTINATION Database except DATABASE LINK (DB LINK).
6. IMPORT the Copied SOURCE EXPORT DUMPFILE(s) to the DESTINATION Database.
7. THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED.
8. Gather Schema statistics at DESTINATION Database after schema refresh.(Recommended)
SOURCE
------
Hostname :
Database Name :
Schema(s) Name :
DESTINATION
------
Hostname :
Database Name :
Schema(s) Name :
@ SOURCE
--------
Database Details:
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
FILE_NAME
---------
/FILE/LOCATION/
Check for the space availability for taking backup
--------------------------------------------------
df -h or df -gt
df -h /FILE/LOCATION/
cd /source/backup/location/
mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO
pwd
/source/backup/location/EXPORT_REF_NO
EXPDP
-----
col DIRECTORY_PATH for a80;
select * from dba_directories;
create or replace directory <EXPDPDIR> as '/source/backup/location/EXPORT_REF_NO';
grant read,write ON DIRECTORY <EXPDPDIR> to <SYSTEM>;
alter user <username> quota unlimited on <tablespace_name>; ---> If required
col DIRECTORY_PATH for a80;
select * from dba_directories;
PAR file
--------
vi SOURCE_SCHEMA_EXPDP.par
userid="/ as sysdba"
directory=DATAPUMP_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=source_schemaname_expdp.log
schemas=schemaname1,schemaname2
exclude=statistics
if required,
PARALLEL=6
JOB_NAME=SOURCE_SCHEMA_EXPDP
COMPRESSION=ALL
STATUS=60
FLASHBACK_TIME="TO_TIMESTAMP('24-APR-2013 16:00:00','DD-MON-YYYY HH24:MI:SS')"
--OR--
FLASHBACK_TIME="TO_TIMESTAMP('24-APR-2013 16:00:00','DD-MM-YYYY HH24:MI:SS')"
:wq
nohup expdp parfile=SOURCE_SCHEMA_EXPDP.par &
Check the status of EXPORT
--------------------------
## COMPRESSION - Reduce size of dumpfile contents, where valid keyword values are: (METADATA_ONLY) and NONE.
## You can export backup of database which was like certain months ago. This you can do using the FLASHBACK_TIME or FLASHBACK_SCN option.
## FLASHBACK_TIME - Time used to get the SCN closest to the specified time.The export operation is performed with data that is consistent as of this
SCN.
EXP
---
If the size of the schema is less better to go for EXP/IMP
nohup exp <user>/`pwd` file=source_schemaname_exp.dmp log=source_schemaname_exp.log owner=schemaname statistics=NONE resumable=y
resumable_name=continue resumable_timeout=18000
-----------------------------------------------------------------------------------------------------
SCP SOURCE DUMP FILES TO DESTINATION
----------------------------------------------------------
@ Source
--------
scp source_schemaname_expdp*.dmp oracle@target_hostname:/target/backup/location/EXPORT_REF_NO/
(OR)
@ Target
--------
cd /target/backup/location/EXPORT_REF_NO/
scp oracle@source_hostname:/source/backup/location/EXPORT_REF_NO/source_schemaname_expdp*.dmp .
pwd
/target/backup/location/EXPORT_REF_NO/
ls -lrth source_schemaname_expdp*.dmp --------> scp files are here
-----------------------------------------------------------------------------------------------------
@ Target
--------
Database Details:
----------------
Table Space Report
------------------
ASM Space Report
----------------
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
Check for the space availabilty for taking backup
-------------------------------------------------
df -h or df -gt
df -h /target/backup/location/
cd /target/backup/location/
mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO
pwd
/target/backup/location/EXPORT_REF_NO
EXPDP
-----
col DIRECTORY_PATH for a80;
select * from dba_directories;
PAR file
--------
vi TARGET_SCHEMA_EXPDP.par & -----> same as above PAR file at SOURCE DATABASE
:wq
nohup expdp parfile=TARGET_SCHEMA_EXPDP.par &
Check the status of EXPORT
--------------------------
EXP
---
If the size of the schema is less better to go for EXP/IMP
nohup exp userid=\'/ as sysdba \' file=target_schemaname_exp.dmp log=target_schemaname_exp.log owner=schemaname statistics=NONE resumable=y
resumable_name=continue resumable_timeout=18000 &
------------------------------------------------------------------------------------------------
Drop Schema(s) Objects
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
exit
NOTE:-
If required,To remove all dropped objects from the recyclebin (current user)
$sqlplus "/as sysdba"
SQL> PURGE DBA_RECYCLEBIN;
DBA Recyclebin purged.
-----------------------------------------------------------------------------------------------------
pwd
/target/backup/location/EXPORT_REF_NO/
ls -lrth source_schemaname_expdp*.dmp
Restore schema backup
---------------------
IMPDP
-----
vi import_source_schemaname_expdp.par
userid="/ as sysdba"
directory=EXPDPDIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=import_source_schemaname_expdp.log
schemas=schemaname1,schemaname2
PARALLEL=6
JOB_NAME=IMPORT_SCHEMA_EXPDP
If required,
remap_schema=<source_schema1>:<target_schema1>,<source_schema2>:<target_schema2>
remap_tablespace=<source_tablespace>:<target_tablespace>
exclude=constraint,ref_constraint,index
nohup impdp parfile=import_source_schemaname_expdp.par &
Validation of Import of Schemas
-------------------------------
set pages 50000 lines 32767
col owner format a15
col object_type format a20
col object_name format a30
select owner, object_name, object_type,status,created,last_ddl_time from dba_objects where owner in ('&owner') group by object_type,owner order by
object_type
/
IMP
---
nohup imp userid=\'/ as sysdba \' file=source_schemaname_exp.dmp log=source_schemaname_exp_import.log fromuser=schemaname touser=schemaname ignore=y
statistics=NONE commit=y resumable=y resumable_name=continue resumable_timeout=18000 &
-----------------------------------------------------------------------------------------------------
Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');
select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;
select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by
object_type;
NOTE:
THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED
-----------------------------------------------------------------------------------------------------
Gather schema stats after schema refresh
----------------------------------------
set timing on
exec dbms_stats.gather_schema_stats(ownname=>'&schema_name', CASCADE=>TRUE,ESTIMATE_PERCENT=>30,degree =>4);
-----------------------------------------------------------------------------------------------------
NOTE: ONCE TASK COMPLETED AT PRODUCTION SERVER IMMEDIATLY DO EXIT.
No comments:
Post a Comment