Sunday, June 7, 2009

whos blocking


-- A quick little standalone script that tells you what process is blocking and what processes the blocking processing actually blocking.

-- When running this script in QA, change your output to "Results in Text" ( CTRL-T ).  Utilizes the blocking info in sp_who2 combined with dbcc inputbuffer and a little cursor to wrap it all up.  Formatting needs a little work, but the info is all there.

set nocount on
declare @spid varchar(10)
declare @blkby varchar(10)
declare @stmt varchar(100)
if not exists ( select top 1 name from tempdb..sysobjects where name like '#temp%' )
begin
   create table #temp ( spid integer, status varchar(100), login varchar(50), hostname varchar(25), blkby varchar(10), 
                        dbname varchar(25), command varchar(100), cputime integer, diskio integer, lastbatch varchar(25), 
                        programname varchar(255), spid2 integer )
end
else
begin
   truncate table #temp
end
insert into #temp
exec sp_who2

declare curs cursor for
select convert(varchar(10),spid), blkby from #temp where blkby not like '%.%'

open curs

fetch next from curs into @spid, @blkby
while @@fetch_status = 0
begin
   set @stmt = 'dbcc inputbuffer(' + @blkby + ')'
   raiserror('SPID:%s is Blocking with the following statement',0,1,@blkby) with nowait
   exec (@stmt)
   raiserror('SPID that is Blocked:%s',0,1,@spid) with nowait
   set @stmt = 'dbcc inputbuffer(' + convert(varchar(10), @spid) + ')'
   exec (@stmt)
   fetch next from curs into @spid, @blkby
end

close curs 

deallocate curs

script out logins for 2000 to 2005 servers

USE master 
GO 
IF OBJECT_ID ('sp_hexadecimal') IS NOT NULL 
DROP PROCEDURE sp_hexadecimal 
GO 
CREATE PROCEDURE sp_hexadecimal 
@binvalue varbinary(256), 
@hexvalue varchar(256) OUTPUT 
AS 
DECLARE @charvalue varchar(256) 
DECLARE @i int 
DECLARE @length int 
DECLARE @hexstring char(16) 
SELECT @charvalue = '0x' 
SELECT @i = 1 
SELECT @length = DATALENGTH (@binvalue) 
SELECT @hexstring = '0123456789ABCDEF' 
WHILE (@i <= @length) 
BEGIN 
DECLARE @tempint int 
DECLARE @firstint int 
DECLARE @secondint int 
SELECT @tempint = CONVERT(int, SUBSTRING(@binvalue,@i,1)) 
SELECT @firstint = FLOOR(@tempint/16) 
SELECT @secondint = @tempint - (@firstint*16) 
SELECT @charvalue = @charvalue + 
SUBSTRING(@hexstring, @firstint+1, 1) + 
SUBSTRING(@hexstring, @secondint+1, 1) 
SELECT @i = @i + 1 
END 
SELECT @hexvalue = @charvalue 
GO 

IF OBJECT_ID ('sp_help_revlogin_2000_to_2005') IS NOT NULL 
DROP PROCEDURE sp_help_revlogin_2000_to_2005 
GO 
CREATE PROCEDURE sp_help_revlogin_2000_to_2005 

@login_name sysname = NULL, 
@include_db bit = 0, 
@include_role bit = 0 

AS 
DECLARE @name sysname 
DECLARE @xstatus int 
DECLARE @binpwd varbinary (256) 
DECLARE @dfltdb varchar (256) 
DECLARE @txtpwd sysname 
DECLARE @tmpstr varchar (256) 
DECLARE @SID_varbinary varbinary(85) 
DECLARE @SID_string varchar(256) 

IF (@login_name IS NULL) 
DECLARE login_curs CURSOR STATIC FOR 
SELECT sid, [name], xstatus, password, isnull(db_name(dbid), 'master') 
FROM master.dbo.sysxlogins 
WHERE srvid IS NULL AND 
[name] <> 'sa' 
ELSE 
DECLARE login_curs CURSOR FOR 
SELECT sid, [name], xstatus, password, isnull(db_name(dbid), 'master') 
FROM master.dbo.sysxlogins 
WHERE srvid IS NULL AND 
[name] = @login_name 

OPEN login_curs 

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb 

IF (@@fetch_status = -1) 
BEGIN 
PRINT 'No login(s) found.' 
CLOSE login_curs 
DEALLOCATE login_curs 
RETURN -1 
END 

SET @tmpstr = '/* sp_help_revlogin script ' 
PRINT @tmpstr 
SET @tmpstr = '** Generated ' 
+ CONVERT (varchar, GETDATE()) + ' on ' + @@SERVERNAME + ' */' 
PRINT @tmpstr 
PRINT '' 
PRINT '' 
PRINT '' 
PRINT '/***** CREATE LOGINS *****/' 

WHILE @@fetch_status = 0 
BEGIN 
PRINT '' 
SET @tmpstr = '-- Login: ' + @name 
PRINT @tmpstr 

IF (@xstatus & 4) = 4 
BEGIN -- NT authenticated account/group 
IF (@xstatus & 1) = 1 
BEGIN -- NT login is denied access 
SET @tmpstr = '' --'EXEC master..sp_denylogin ''' + @name + '''' 
PRINT @tmpstr 
END 
ELSE 
BEGIN -- NT login has access 
SET @tmpstr = 'IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE [name] = ''' + @name + ''')' 
PRINT @tmpstr 
SET @tmpstr = CHAR(9) + 'CREATE LOGIN [' + @name + '] FROM WINDOWS' 
PRINT @tmpstr 
END 
END 
ELSE 
BEGIN -- SQL Server authentication 
EXEC sp_hexadecimal @SID_varbinary, @SID_string OUT 

IF (@binpwd IS NOT NULL) 
BEGIN -- Non-null password 
EXEC sp_hexadecimal @binpwd, @txtpwd OUT 
SET @tmpstr = 'CREATE LOGIN [' + @name + '] WITH PASSWORD=' + @txtpwd + ' HASHED' 
END 
ELSE 
BEGIN -- Null password 
SET @tmpstr = 'CREATE LOGIN [' + @name + '] WITH PASSWORD=''''' 
END 

SET @tmpstr = @tmpstr + ', CHECK_POLICY=OFF, SID=' + @SID_string 
PRINT @tmpstr 
END 

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb 
END 

IF @include_db = 1 
BEGIN 
PRINT '' 
PRINT '' 
PRINT '' 
PRINT '/***** SET DEFAULT DATABASES *****/' 

FETCH FIRST FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb 

WHILE @@fetch_status = 0 
BEGIN 
PRINT '' 
SET @tmpstr = '-- Login: ' + @name 
PRINT @tmpstr 

SET @tmpstr = 'ALTER LOGIN [' + @name + '] WITH DEFAULT_DATABASE=[' + @dfltdb + ']' 
PRINT @tmpstr 

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb 
END 
END 

IF @include_role = 1 
BEGIN 
PRINT '' 
PRINT '' 
PRINT '' 
PRINT '/***** SET SERVER ROLES *****/' 

FETCH FIRST FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb 

WHILE @@fetch_status = 0 
BEGIN 
PRINT '' 
SET @tmpstr = '-- Login: ' + @name 
PRINT @tmpstr 

IF @xstatus &16 = 16 -- sysadmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''sysadmin''' 
PRINT @tmpstr 
END 

IF @xstatus &32 = 32 -- securityadmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''securityadmin''' 
PRINT @tmpstr 
END 

IF @xstatus &64 = 64 -- serveradmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''serveradmin''' 
PRINT @tmpstr 
END 

IF @xstatus &128 = 128 -- setupadmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''setupadmin''' 
PRINT @tmpstr 
END 

IF @xstatus &256 = 256 --processadmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''processadmin''' 
PRINT @tmpstr 
END 

IF @xstatus &512 = 512 -- diskadmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''diskadmin''' 
PRINT @tmpstr 
END 

IF @xstatus &1024 = 1024 -- dbcreator 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''dbcreator''' 
PRINT @tmpstr 
END 

IF @xstatus &4096 = 4096 -- bulkadmin 
BEGIN 
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''bulkadmin''' 
PRINT @tmpstr 
END 

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb 
END 
END 

CLOSE login_curs 
DEALLOCATE login_curs 
RETURN 0 
GO

exec sp_help_revlogin_2000_to_2005 @login_name=NULL, @include_db=1, @include_role=1
GO

Reading a SQL Profiler Trace file

Reading a SQL Profiler Trace file

In SQL Profiler you can open data from a file or a table with File -> Open menu option. If you have your data in multiple rolled over files SQL profiler will ask you if you'd like to open the next file after it reaches the end of the current file. You can also replay your trace using the Replay menu in which you can even set breakpoints.

Personally I like to analyze my trace files with SQL. To do that you have to have the data in a trace table. If you have a file you should import that file into the table.

The only way to do that is by using the fn_trace_gettable built-in table valued function:

select *

from fn_trace_gettable('c:\MyTraceFile.trc', default)

The first parameter is obviously the path to the trace file on the server's disk. The second parameter is the number of rolled over files to be read. The default value is -1 which means all files are read. If you want to read just one of many rolled over files use 1 as your parameter value.

Although you could use the function to read the trace file each time, the better way is to simply import the trace into a table and query the table itself since reading the file each time is slow. A simple way to do this is:

select IDENTITY(BIGINT, 1, 1) AS RowNumber, *

into MyTraceTable

from fn_trace_gettable('c:\MyTraceFile.trc', default)

where 1 = 0 -- just create the table without any data

After you have the table definition you can index it to your liking and then simply use insert to populate the table:

insert into MyTraceTable()

select

from fn_trace_gettable('c:\MyTraceFile.trc', default)

Combining SQL Profiler Trace file and PerfMon log file

Start SQL Profiler and open a trace file in it. The SQL Trace file and the PerfMon log file must be time synchronized. So either you have to be gathered on the same machine or the 2 machines have to be time synchronized. Wait until the SQL Trace file is fully loaded. Then go to File -> Import Performance Data...

Choose the PerfMon log file (.BLG, .CSV) that correlates with the SQL trace file. A dialog will open asking you to select the counters you'd like to display. Click OK and you should get a screen looking similar to this:

Combine SQL Profiler Trace file and PerfMon counters log file

Picture 4: Combine SQL Profiler Trace file and PerfMon counters log file

By clicking on the PerfMon graph you can see that the selected row in your trace changes. The PerfMon counter has the minimum polling interval of 1 second so you will get more than one sql event per PerfMon counter and the selected row in the trace is the row that best corresponds with the PerfMon counter at that time.

 

Shrink a named transaction log file belonging to a database

/*
Shrink a named transaction log file belonging to a database

Originally found at;

http://support.microsoft.com/support/kb/articles/q256/6/50.asp

Changes:
28.08.2001
Modified the inner loop so it tested the dx time so long overruns did not happen
Modified the inner loop so it had a fixed minimum quantity so there was no skip in skip out

29.08.2001
Modified the inner loop so it had a dynamic minimum quantity to allow faster shrinkage

24.01.2002
Modified the USE statement so it uses brackets around the dbname
Modified the @TruncLog variable so it uses brackets around the dbname

31.05.2002
Modified the code to use PRINT instead of SELECT in several cases
Modified the code to use @MaxCount instead of two unclear rules 
Modified the code to use @Factor instead of several hard-coded values 
Commented the use of @Factor
Moved the configuration and @Counter init code to before the start of the first loop to avoid repetition
Modified the code to display the process runtime in seconds rather than minutes

*/

SET NOCOUNT ON

DECLARE @LogicalFileName SYSNAME,
        @MaxMinutes INT,
        @NewSize INT,
        @Factor FLOAT

/*
  The process has several control parameters, most of the time you only need to worry about the first four
    as these are the big controls whereas the fifth is simply a fine tuning control which rarely needs to 
    come into play.
*/

--This is the name of the database for which the log will be shrunk.
USE [Speqs]

--Use sp_helpfile to identify the logical file name that you want to shrink.
SET @LogicalFileName = 'Speqs_Log';
--Limit on time allowed to wrap log in minutes
SET @MaxMinutes = 5;
--Ideal size of logfile in MB
SET @NewSize =100;

/*
  Factor determining maximum number of pages to pad out based on the original number of pages in use 
    (single page = 8K).  Values in the range 1.0 - 0.8 seems to work well for many databases.

  Increasing the number will increase the maximum number of pages allowed to be padded, which should
    force larger amounts of data to be dropped before the process finishes.  Often speeds up shrinking
    very large databases which are going through the process before the timer runs out.

  Decreasing the number will decrease the maximum number of pages allowed to be padded, which should
    force less work to be done.  Often aids with forcing smaller databases to shrink to minimum size
    when larger values were actually expanding them.

*/
SET @Factor = 1.0;                        

/*
  All code after this point is driven by these parameters and will not require editing unless you need to 
    fix a bug in the padding/shrinking process itself.
*/

-- Setup / initialize
DECLARE @OriginalSize INT,
        @StringData VARCHAR(500)

SELECT @OriginalSize = size -- in 8K pages
FROM sysfiles
WHERE name = @LogicalFileName;

SELECT @StringData = 'Original Size of ' + db_name() + ' LOG is ' + 
    CONVERT(VARCHAR(30),@OriginalSize) + ' 8K pages or ' + 
    CONVERT(VARCHAR(30),(@OriginalSize*8/1024)) + 'MB'
FROM sysfiles
WHERE name = @LogicalFileName;

PRINT @StringData;
PRINT ''

--Drop the temporary table if it already exists
IF ( OBJECT_ID('[dbo].[DummyTrans]') IS NOT NULL )
  DROP TABLE [DummyTrans]

CREATE TABLE [DummyTrans]( [DummyColumn] CHAR(8000) NOT NULL );

-- Wrap log and truncate it.
DECLARE @Counter   INT,
        @MaxCount  INT,
        @StartTime DATETIME,
        @TruncLog  VARCHAR(500)

-- Try an initial shrink.
DBCC SHRINKFILE (@LogicalFileName, @NewSize)

SET @TruncLog = 'BACKUP LOG [' + db_name() + '] WITH TRUNCATE_ONLY';
EXEC (@TruncLog)

-- Configure limiter
IF @OriginalSize / @Factor > 50000
    SET @MaxCount = 50000
ELSE
    SET @MaxCount = @OriginalSize * @Factor

-- Attempt to shrink down the log file
PRINT 'Minimum Quantity : '+CAST( @MaxCount AS VARCHAR(10) )
PRINT 'Maximum Time : '+CAST( @MaxMinutes AS VARCHAR(10) )+' minutes ('+CAST( @MaxMinutes*60 AS VARCHAR(10) )+' seconds)'
PRINT ''

SET @Counter = 0;
SET @StartTime = GETDATE();

--loop the padding code to reduce the log while
-- within time limit and 
-- log has not been shrunk enough
WHILE (
    (@MaxMinutes*60 > DATEDIFF(ss, @StartTime, GETDATE())) AND
    (@OriginalSize = (SELECT size FROM sysfiles WHERE name = @LogicalFileName)) AND
    ((@OriginalSize * 8 / 1024) > @NewSize)
)
BEGIN --Outer loop.

    --pad out the logfile a page at a time while
    -- number of pages padded does not exceed our maximum page padding limit
    -- within time limit and 
    -- log has not been shrunk enough
    WHILE (
        (@Counter < @MaxCount) AND 
        (@MaxMinutes*60 > DATEDIFF(ss, @StartTime, GETDATE())) AND
        (@OriginalSize = (SELECT size FROM sysfiles WHERE name = @LogicalFileName)) AND
        ((@OriginalSize * 8 / 1024) > @NewSize)
    )
    BEGIN --Inner loop
        
        INSERT INTO DummyTrans VALUES ('Fill Log')  -- Because it is a char field it inserts 8000 bytes.
        DELETE FROM DummyTrans
        SELECT @Counter = @Counter + 1

        --Every 1,000 cycles tell the user what is going on
        IF ROUND( @Counter , -3 ) = @Counter
        BEGIN
            PRINT 'Padded '+LTRIM( CAST( @Counter*8 AS VARCHAR(10) ) )+'K @ '+LTRIM( CAST( DATEDIFF( ss, @StartTime, GETDATE() ) AS VARCHAR(10) ) )+' seconds';
        END
    END

    --See if a trunc of the log shrinks it.
    EXEC( @TruncLog )

END
PRINT ''

SELECT @StringData = 'Final Size of ' + db_name() + ' LOG is ' +
   CONVERT(VARCHAR(30),size) + ' 8K pages or ' + 
   CONVERT(VARCHAR(30),(size*8/1024)) + 'MB'
FROM sysfiles 
WHERE name = @LogicalFileName;

PRINT @StringData
PRINT ''

DROP TABLE DummyTrans;
PRINT '*** Perform a full database backup ***'

SET NOCOUNT OFF

Tuesday, January 27, 2009

Tools for helping to backup databases



I find litespeed the best backup too by far for mssql products, Why might you ask do I need a backup tool when mssql has sqlbackup built in. Well the main reason is that mssql backup until now did not have a compression tool 2008 now does. However, if you are on sql2005 or below then I would recommend litespeed.

here is how to install litespeed and a small guide to using it to back up.

SQL Lite Speed Installation Guide

Installation Steps

1. Put in the CD brows the cd
2. Launch the install by double clicking on the file litespeedforsqlserver32bit_45000157_1.msi

The screen below will be shown once the installer starts




Choose Next> to move to the next screen




Choose Next> to move to the next screen




Accept the License Agreement and choose Next> to move to the next screen


Enter the User Name and Organization as above. This is the default and should already be filled in. Ensure the application is installed for anyone who uses the computer and choose Next >




The default destination folder will be on the C: drive. This needs to be changed to reside on the E: drive.
Choose the Change… button


Enter the folder name as E:\Imceda\LiteSpeed\SQL Server\ and click OK

If the destination folder is defined correctly choose Next >



Select Complete setup and choose Next >





Click the Install button to begin the installation


Choose Next >



Select the “Use Same login details for all instances” and ensure Install is listed as the Install Action against all SQL Instances on the server.


We do not perform activity logging so ensure that Activity Logging is not setup and choose Next >



The following screen will summarise what will be installed. Choose Next >



The Installation has now completed. Choose Finish.

The Registration process will now be launched.



Choose Next >



Select “Register your copy of LiteSpeed 2005” and choose Next >



Select “Load Registration Key From File”. The file you have to load is called GlobalLiteSpeedLicenseKey.txt and is located in the same folder as the installation software.

Choose Next > to proceed




Once the Registration process has been completed, the installation is now complete.






to backup the database use
xp_backup_database
@database = database
,@filename = '\path\backup\database.bak'


To restore a database with litespeed us this command
How to restore a file with litespeed if you get the error this is not a vaild backup file.

xp_restore_database
@database = 'database'
,@filename = '\path\backup\database.BAK'

or you could create a job to do this for you.

Thursday, January 8, 2009

Select Tutorial



There are many ways to select data from tables I will go through them.

Counting rows

Counting Rows :

COUNT(*) counts the number of rows in a table.

The syntax is
select count(*) from table name
here is an Example

select count(*) from test;
+----------+
| count(*) |
+----------+
| 148|
+----------+
1 row in set (0.00 sec)

This query shows the number of rows in the test table

The most basic of querys in the

select everything query
and now you learned how to insert data into the table, we should probably check the datas is stored correctly. To do so, we use the SELECT statement.

The Select syntax is

SELECT what_to_select from table name;

To view all the data from the table, we use the below query.


select * from test;
OfficeCode AccountCode AccountName SecType
1111 084534334 Test 1
1112 3434343435 3434 2
2 rows in set (0.00 sec)
The above example query will list the complete details of the test table. Here * will select all the columns from the

To also limit your select
use

select top 10 * from test

this will show the top 10 rows

OfficeCode AccountCode AccountName
6 6670708 test
6 6610108 test2
6 6044208 test3
6 6045208 test4
6 6049208 test5
6 6046208 test6
6 6050208 test7
6 6047208 test8
6 6048208 test9
6 6041208 test10



Bookmark and Share

Wednesday, January 7, 2009

Inserting in to a table



Now you have created your table here is a basic insert statement to show you how to insert data.

INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 1, 21 )
go
INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 39, 21 )
go
INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 1998, 2001 )
go
INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 26, 2817 )
go
INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 21, 2938 )
go
INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 12, 21 )
go
INSERT INTO dbo.BRIAN ( a, b ) VALUES ( 3939, 4848 )
go

creating a table



Now you know how to create a database here is an example of creating a table.


USE [test]
GO
/****** Object: Table [dbo].[Audit] ******/SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [dbo].[test]( [id] [int] IDENTITY(1,1) NOT NULL, [tracename] [nvarchar](100) NULL, [enable] [int] NULL) ON [PRIMARY]

If NULL is specified, the field is allowed to be left empty. If NOT NULL is specified, the field must be given a value. In the absence of either a NULL or NOT NULL, NULL is assumed.

The above query will create the table student with fields ID and Name.PRIMARY KEY : A PRIMARY KEY is a field in a table that uniquely identifies a record. This attribute is used to define the field name to create a primary key.

PRIMARY KEY(id)

What is a Database Administrator(DBA)



here are too types of DBA, A Production DBA and Software DBA Software DBA's mainly help with designing databases and schemers where as a Production DBA will help to keep a system tight design what permissions a use has, Backup maintain the databases and perform code releases.

The best way to become a DBA is to take the relevant classes and familiarize yourself with the specific, popular databases that are in demand. Becoming certified in a specific database program is a good way to show your skill level. Internships are often a great start to a Database Administrator career. Other technical support roles, such as System Administrators and Network Administrators, may easily transition into a Database Administration role by becoming certified as a Database Administrator

What is SQL?

SQL stands for Structured Query Language and is used to communicate with Databases, this is the standard language for use on relational database management systems(RDBMS). SQL statements are used for such commands as selecting from tables updates and deletes. Selects retrieve data from the database tables. The most common RDBMS system are Sybase, Oracle, Microsoft SQL server, MYSQL, Access, Postgress and Informix. Although most Databases systems use SQL some databases have there own software. The standard commands in SQL as mentioned before are Select", "Insert", "Update", "Delete", "Create", and "Drop" these commands can accomplish most tasks that one needs to do with a database.

Economic Down Turn



I am now living in Japan, I am a DBA for a contract company contracting to a bigger company.
With the downturn I am always worried about my job, I guess I am lucky as I still have my job, However, it is funny as I am told there is always work for DBA's and DBA's are still in demand.

I am upset that the govenment has not done anything to protect the people that are laid off and are bailing out the people that caused all the trouble.

What we can do during these times is try to make our skills count. And, instead of worrying about or jobs and letting our work suffer is try to reduced the change of us getting laid off try to make your job so valuble that you wont get cut. How you might ask?
firstly I would say try to boost your DBA and IT skills how you might ask again? My Training budget has been cut.

Well I would say use the internet there are a number of resources out there.
try sites like
http://www.databasejournal.com
http://www.sqlservercentral.com
www.sybase.com
www.oracle.com
all these sites have good techical questions and also Oracle and Sybase have great online books free to download.

Start new projects maybe a migration or try new technologies if you are using database backup try litespeed, have a look what else is out there you may get free downloads you can practise with add it too your skill set so if you do get laid off then you can show these skills.

Try to find events some of these have free training, i know SQL PASS and code Camps do.

So in other words dont sit around moping, invest in yourself. this may cost nothing but your time but you will reap there rewards in the end.

if you delete your transaction log



If you delete the transactionlog by mistake try this script

This script should fix a databases where the transaction log has been deleted or croupted. You will have do do some of this manually as explained it will take a while depending on the databases size.
/*
Stop SQL service, rename mydb.mdf to mydbBad.mdf.
started SQL service, created fake mydb db (with log etc)
Stopped SQL service
Deleted mydb.mdf
Renamed mydbBad.MDF to mydb.MDF
Started SQL service.
Ran following script:
*/
ALTER DATABASE mydb SET EMERGENCY
sp_dboption 'mydb', 'single user', 'true'
DBCC CHECKDB ('mydb', REPAIR_ALLOW_DATA_LOSS)
sp_dboption 'mydb', 'single user', 'false'

scripting out logins from 2000 to 2005

SQL 2000 to SQL 2005: Where have all the old features gone?

http://www.sqlmag.com/Articles/ArticleID/16090/16090.html

Introduction where have all the good features goine in sql2005
As more DBAs across the planet begin using SQL 2005 Tools, but still manage SQL 2000 servers with them, I suspect there will be lots of muffled moaning and wondering where have all the good old features gone. Although Management Studio has some very nice long-awaited features, some of the good old stuff just isn’t there.
Where are my tried and true tools, such as taskpad? Where’s the IF EXISTS DROP option when I script out the stored procedures? Could someone pinch me and tell me this is just a bad dream?
The aspirin industry will profit enormously from that sort of thing.
To name a few good old pals that have all but disappeared into obscurity:
• Taskpad
• Ability to quickly script permissions on stored procedures
• Ability to quickly see disk space in database properties
• Time of creation of stored procedures
Sure, if you're connecting to an instance of SQL Server 2005 with Management Studio you get colorful reports and plethora of professional-looking graphs at your disposal, but what about the majority of us that still did not migrate our servers to SQL 2005, but already upgraded the tools?
The good news is this will tend to convert many GUI DBAs into hardened command-line pros, improve they're typing skills, etc. In the next section I will show how to still take advantage of the old tools functionality.
Taskpad functionality
I don't know about you all, but I really like the Taskpad and use it all the time. I am used to it like to an old slipper; it fits my needs. And even if it did throw a vague error now and then I forgive it now that it's done….forever. But how can we get its functionality back?
The General tab in Database section is now in database properties under the same heading.
Maintenance section-like information can be found by querying the backupset table in msdb:
select max(backup_start_date) from backupset
where database_name = ‘my_db'
Note: Database options, Number of Users, Date Created and Owner can still be found in database properties in the SQL 2005 tools.
Space allocated section info can be found by running this T-SQL:
select * from sysfiles
or if you just need to find the space used by you log, execute:
DBCC SQLPERF (LOGSPACE)
Table Info tab
I don't use this one very often, but you can get similar functionality by running:
Exec sp_spaceused ‘your_table_name'
To script multiple stored procedures including permissions:
Right-click the database->Tasks->Generate Scripts, pick your database. Set Include object Level Permissions to True. Note: If you set the Include if NOT EXISTS option to true, the script will not create the stored procedure if it already exists on target database.
Click Next, and select Stored Procedures only. Next select which procs you want to script, review you final options and click Finish.
Unfortunately, if you want to drop/recreate the procedures if they exist on the target server, you will need to manually include the following script in the beginning of each procedure:
IF EXISTS (SELECT name FROM sysobjects
WHERE name = 'your_proc_name' AND type = 'P')
DROP PROCEDURE 'your_proc_name'
GO
This one is truly beyond me, for reasons known only to Microsoft and the CEO of Bayer (or whoever is the biggest headache drug company these days) this option was excluded from final SQL 2005 RTM.
Check disk space
If you're like I am, you're used to clicking on database properties, and the ellipsis in order to see the free disk space on the server. In SQL Server 2005 you can get this in the report, but until then you can run undocumented extended stored procedure:
exec xp_fixeddrives
The result would look something like this:

Time of creation of stored procedures:
For some reason the time part of the Create Date column in the Summary tab of SQL 2005 is depreciated. Why? I guess someone thought DBAs don't need it any longer (rolling eyes). The good news is you can still get this information by querying the sysobjects table, like:
Select crdate as DateCreated
From dbo.sysobjects
where name = 'your_proc_name'

Note: make sure NOT to specify the owner, as in dbo.your_proc_name, but simply the procedure name. The result is:

Summary
I've shown you here how to get by with available SQL 2005 Tools until you upgrade your servers to SQL 2005 Server edition. People get used to their favorite ways to get the job done, and sometimes get “surprised” when their tools get taken away, and now they have to hammer the nail with the handle of a screwdriver. Hopefully the ways to attain old functionality will increase productivity, and hopefully the tools will continue to improve.


MSSQL How to transfer logins
http://support.microsoft.com/kb/246133
• You transfer logins and passwords from SQL Server 7.0 to SQL Server 2005.
• You transfer logins and passwords from SQL Server 2000 to SQL Server 2005.
• You assign logins to roles.
To transfer logins and passwords between different versions of SQL Server and then assign logins to roles, follow these steps:
1. Run the following script on the source SQL Server.
USE master
GO
IF OBJECT_ID ('sp_hexadecimal') IS NOT NULL
DROP PROCEDURE sp_hexadecimal
GO
CREATE PROCEDURE sp_hexadecimal
@binvalue varbinary(256),
@hexvalue varchar(256) OUTPUT
AS
DECLARE @charvalue varchar(256)
DECLARE @i int
DECLARE @length int
DECLARE @hexstring char(16)
SELECT @charvalue = '0x'
SELECT @i = 1
SELECT @length = DATALENGTH (@binvalue)
SELECT @hexstring = '0123456789ABCDEF'
WHILE (@i <= @length)
BEGIN
DECLARE @tempint int
DECLARE @firstint int
DECLARE @secondint int
SELECT @tempint = CONVERT(int, SUBSTRING(@binvalue,@i,1))
SELECT @firstint = FLOOR(@tempint/16)
SELECT @secondint = @tempint - (@firstint*16)
SELECT @charvalue = @charvalue +
SUBSTRING(@hexstring, @firstint+1, 1) +
SUBSTRING(@hexstring, @secondint+1, 1)
SELECT @i = @i + 1
END
SELECT @hexvalue = @charvalue
GO

IF OBJECT_ID ('sp_help_revlogin_2000_to_2005') IS NOT NULL
DROP PROCEDURE sp_help_revlogin_2000_to_2005
GO
CREATE PROCEDURE sp_help_revlogin_2000_to_2005

@login_name sysname = NULL,
@include_db bit = 0,
@include_role bit = 0

AS
DECLARE @name sysname
DECLARE @xstatus int
DECLARE @binpwd varbinary (256)
DECLARE @dfltdb varchar (256)
DECLARE @txtpwd sysname
DECLARE @tmpstr varchar (256)
DECLARE @SID_varbinary varbinary(85)
DECLARE @SID_string varchar(256)

IF (@login_name IS NULL)
DECLARE login_curs CURSOR STATIC FOR
SELECT sid, [name], xstatus, password, isnull(db_name(dbid), 'master')
FROM master.dbo.sysxlogins
WHERE srvid IS NULL AND
[name] <> 'sa'
ELSE
DECLARE login_curs CURSOR FOR
SELECT sid, [name], xstatus, password, isnull(db_name(dbid), 'master')
FROM master.dbo.sysxlogins
WHERE srvid IS NULL AND
[name] = @login_name

OPEN login_curs

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb

IF (@@fetch_status = -1)
BEGIN
PRINT 'No login(s) found.'
CLOSE login_curs
DEALLOCATE login_curs
RETURN -1
END

SET @tmpstr = '/* sp_help_revlogin script '
PRINT @tmpstr
SET @tmpstr = '** Generated '
+ CONVERT (varchar, GETDATE()) + ' on ' + @@SERVERNAME + ' */'
PRINT @tmpstr
PRINT ''
PRINT ''
PRINT ''
PRINT '/***** CREATE LOGINS *****/'

WHILE @@fetch_status = 0
BEGIN
PRINT ''
SET @tmpstr = '-- Login: ' + @name
PRINT @tmpstr

IF (@xstatus & 4) = 4
BEGIN -- NT authenticated account/group
IF (@xstatus & 1) = 1
BEGIN -- NT login is denied access
SET @tmpstr = '' --'EXEC master..sp_denylogin ''' + @name + ''''
PRINT @tmpstr
END
ELSE
BEGIN -- NT login has access
SET @tmpstr = 'IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE [name] = ''' + @name + ''')'
PRINT @tmpstr
SET @tmpstr = CHAR(9) + 'CREATE LOGIN [' + @name + '] FROM WINDOWS'
PRINT @tmpstr
END
END
ELSE
BEGIN -- SQL Server authentication
EXEC sp_hexadecimal @SID_varbinary, @SID_string OUT

IF (@binpwd IS NOT NULL)
BEGIN -- Non-null password
EXEC sp_hexadecimal @binpwd, @txtpwd OUT
SET @tmpstr = 'CREATE LOGIN [' + @name + '] WITH PASSWORD=' + @txtpwd + ' HASHED'
END
ELSE
BEGIN -- Null password
SET @tmpstr = 'CREATE LOGIN [' + @name + '] WITH PASSWORD='''''
END

SET @tmpstr = @tmpstr + ', CHECK_POLICY=OFF, SID=' + @SID_string
PRINT @tmpstr
END

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb
END

IF @include_db = 1
BEGIN
PRINT ''
PRINT ''
PRINT ''
PRINT '/***** SET DEFAULT DATABASES *****/'

FETCH FIRST FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb

WHILE @@fetch_status = 0
BEGIN
PRINT ''
SET @tmpstr = '-- Login: ' + @name
PRINT @tmpstr

SET @tmpstr = 'ALTER LOGIN [' + @name + '] WITH DEFAULT_DATABASE=[' + @dfltdb + ']'
PRINT @tmpstr

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb
END
END

IF @include_role = 1
BEGIN
PRINT ''
PRINT ''
PRINT ''
PRINT '/***** SET SERVER ROLES *****/'

FETCH FIRST FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb

WHILE @@fetch_status = 0
BEGIN
PRINT ''
SET @tmpstr = '-- Login: ' + @name
PRINT @tmpstr

IF @xstatus &16 = 16 -- sysadmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''sysadmin'''
PRINT @tmpstr
END

IF @xstatus &32 = 32 -- securityadmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''securityadmin'''
PRINT @tmpstr
END

IF @xstatus &64 = 64 -- serveradmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''serveradmin'''
PRINT @tmpstr
END

IF @xstatus &128 = 128 -- setupadmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''setupadmin'''
PRINT @tmpstr
END

IF @xstatus &256 = 256 --processadmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''processadmin'''
PRINT @tmpstr
END

IF @xstatus &512 = 512 -- diskadmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''diskadmin'''
PRINT @tmpstr
END

IF @xstatus &1024 = 1024 -- dbcreator
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''dbcreator'''
PRINT @tmpstr
END

IF @xstatus &4096 = 4096 -- bulkadmin
BEGIN
SET @tmpstr = 'exec master.dbo.sp_addsrvrolemember @loginame=''' + @name + ''', @rolename=''bulkadmin'''
PRINT @tmpstr
END

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @xstatus, @binpwd, @dfltdb
END
END

CLOSE login_curs
DEALLOCATE login_curs
RETURN 0
GO

exec sp_help_revlogin_2000_to_2005 @login_name=NULL, @include_db=1, @include_role=1
GO
2. Save the output, and then paste and run the output in SQL Server Management Studio on the destination SQL Server 2005.
Note If the source SQL Server contains a login that has a blank password, the output contains a statement that resembles the following.
CREATE LOGIN LoginName WITH PASSWORD = '', CHECK_POLICY = OFF, SID = MySID
Back to the top
Remarks
• Review the output script carefully before you run it on the destination SQL Server. If you have to transfer logins to an instance of SQL Server in a different domain than the source instance of SQL Server, edit the script generated by the sp_help_revlogin procedure, and replace the domain name with the new domain in the sp_grantlogin statements. Because the integrated logins granted access in the new domain will not have the same SID as the logins in the original domain, the database users will be orphaned from these logins. To resolve these orphaned users, see the articles referenced in the following bullet item. If you transfer integrated logins between instances of SQL Servers in the same domain, the same SID is used and the user is not likely to be orphaned.
• After you move the logins, users may not have permissions to access databases that have also been moved. This problem is described as an "orphaned user". If you try to grant the login access to the database, it may fail indicating the user already exists:
Microsoft SQL-DMO (ODBC SQLState: 42000) Error 15023: User or role '%s' already exists in the current database.
For instructions about how to map the logins to the database users to resolve orphaned SQL Server logins and integrated logins, see the following article in the Microsoft Knowledge Base:
240872 (http://support.microsoft.com/kb/240872/) How to resolve permission issues when you move a database between servers that are running SQL Server
For instructions about using the sp_change_users_login stored procedure to correct the orphaned users one-by-one (this will only address users orphaned from standard SQL logins), see the following article in the Microsoft Knowledge Base:
274188 (http://support.microsoft.com/kb/274188/) "Troubleshooting Orphaned Users" topic in Books Online is incomplete
• If the transfer of logins and passwords is part of a move of databases to a new server running SQL Server, see the following article in the Microsoft Knowledge Base for a description of the workflow and steps involved:
314546 (http://support.microsoft.com/kb/314546/) How to move databases between computers that are running SQL Server
• You can do this because of the @encryptopt parameter in the sp_addlogin system stored procedure, that allows a login to be created by using the encrypted password. For more information about this procedure, see the "sp_addlogin (T-SQL)" topic in SQL Server Books Online.
• By default, only members of the sysadminfixed server role can select from the sysxlogins table. Unless a member of the sysadmin role grants the necessary permissions, end users cannot create or run these stored procedures.
• This approach does not try to transfer the default database information for a particular login because the default database may not always exist on the destination server. To define the default database for a login, you can use the sp_defaultdb system stored procedure by passing it the login name and the default database as arguments. For more information about using this procedure, see the "sp_defaultdb" topic in SQL Server Books Online.
• During a transfer of logins between instances of SQL Server, if the sort order of the source server is case-insensitive and the sort order of the destination server is case-sensitive, you must enter all alphabetical characters in passwords as uppercase characters after the transfer of logins to the destination server. If the sort order of the source server is case-sensitive and the sort order of the destination server is case-insensitive, you will not be able to log in with the logins transferred using the procedure outlined in this article, unless the original password contains no alphabetical characters or unless all alphabetical characters in the original password are uppercase characters. If both servers are case-sensitive or both servers are case-insensitive, you will not experience this problem. This is a side effect of the way that SQL Server handles passwords. For more information, see the "Effect on Passwords of Changing Sort Orders" topic in SQL Server 7.0 Books Online.
• When you run the output from the sp_help_revlogin script on the destination server, if the destination server already has a login defined with the same name as one of the logins on the script output, you may see the following error upon execution of the output of the sp_help_revlogin script:
Server: Msg 15025, Level 16, State 1, Procedure sp_addlogin, Line 56
The login 'test1' already exists.
Likewise, if a different login exists with the same SID value on this server as the one you are trying to add, you receive the following error message:
Server: Msg 15433, Level 16, State 1, Procedure sp_addlogin, Line 93
Supplied parameter @sid is in use.
Therefore, you must carefully review the output from these commands, examine the contents of the sysxlogins table, and address these errors accordingly.
• The SID value for a particular login is used as the basis for implementing database level access in SQL Server. Therefore, if the same login has two different values for the SID at the database level (in two different databases on that server), the login will only have access to that database whose SID matches the value in syslogins for that login. Such a situation might occur if the two databases in question have been consolidated from two different servers. To resolve this problem, the login in question would have to be manually removed from the database that has a SID mismatch by using the sp_dropuser stored procedure, and then added again by using the sp_adduser stored procedure.

logshipping

Log shipping:

Using the log shipping wizard
To use the log shipping wizard in SQL Server 2005, right click on your database and select Tasks and Ship Transaction Logs as illustrated in Figure 1.
Figure 1: Accessing the log shipping wizard. (Click on image for enlarged view.)
You may get the dialog you see displayed in Figure 2. SQL Server is telling you that this database is not a full or bulk logged recovery model.
Figure 2: Dialog displayed if your database is in the simple recovery model. (Click on image for enlarged view.)
To change the recovery model for your database, right click on your database, select properties and in the Options tab, click on the Recovery Model drop down list box and select Full or Bulk logged recovery model. This is illustrated in Figure 3. Then click OK.
Note: If you do select these recovery models and you back up your database or have already backed up the database, you will need to maintain your transaction logs by scheduling a backup of them. The log shipping wizard will configure this for you.
Figure 3: Changing the recovery model for your database. (Click on image for enlarged view.)
Once you've set the correct recovery model for your database, right click on your database and select Ship Transaction Logs again. You will receive the dialog box illustrated in Figure 4.
Figure 4: Configuring log shipping. (Click on image for enlarged view.)
Note: In this figure, I selected the check box that enables this as a primary database in a log shipping configuration. Make sure you select this check box as well. Clicking on the Backup Settings button will display the Transaction Log Backup Settings as displayed in Figure 5.
Figure 5: Transaction Log Backup Settings and options. (Click on image for enlarged view.)
There are two options to backup the transaction log to:
Network path
Local path
If you are backing up to a network path, there will be some increase in the transaction log backup times and you will decrease the space requirements on your primary server. The network path should go to the location where you want store the transaction log backups on the secondary server. Most DBAs use a network share to back up the files to




Because they want the transaction log backup files on the secondary server to protect them in the event that the primary server goes down.
The local path option will back up the transaction logs to a path on your primary server. Make sure the transaction log backups are not backed up to the same physical drive where your database data files or log files exist. Should they be backed up to the same physical drive, it would cause I/O contention and overall SQL Server performance degradation.
Figure 6 illustrates a completed Transaction Log Backup Settings dialog. Note that the share name has a dollar sign after it. This has the effect of hiding the share from users, and only users who know the share name will be able to access it. This is considered a good security practice.
Figure 6: Transaction Log Backup Settings illustrating a hidden share for the network path. (Click on image for enlarged view.)
There is an option for how long to retain your transaction log backups and an option for the threshold to raise an alert if there is no backup within a specific period. I've found the default for transaction log backup retention of three days to be adequate. I normally set a much smaller alert, like 20 minutes, but while a database backup is occurring, no log backups can occur, so if your database backups take more than an hour, you will get frequent alerts. Set this to a value that works for you. And remember: Too frequent alerts will result in your ignoring critical ones.
Once you have set the transaction log backup location, the retention period, the alerting threshold and the schedule, click OK. You will return to the Database Properties dialog box as illustrated in Figure 4. Click on the Next button to display the secondary Database Settings dialog displayed in Figure 7.
Figure 7: The secondary database settings. (Click on image for enlarged view.)
In this dialog box, you can select the secondary server (the server you'll be copying the transaction logs to) as well as the secondary database (the name of the database you will be log shipping to). Notice these options for Initialize Secondary Database:
Yes, generate a full backup of the primary and restore it into the secondary database. This option creates a backup of the database you wish to log ship and restores it on the secondary server.
Yes, restore an existing backup of the primary database. Use this option if you want to use a previously backed up database. There is an option to select the path and backup name.
No, the secondary database is initialized. Use this option if:
A copy of the database you wish to log ship has already been restored on the secondary.
The primary database is already in the full or bulk logged recovery model.
No transaction log backups have been done on the primary database since the backup was done, or if they have been done, they have been restored on the secondary.
The secondary database has been restored with the no-recovery option.
Once you have configured the options that work best for you, click on the Copy Files tab, to display the dialog, as illustrated in Figure 8.
Figure 8: Copy Files dialog. (Click on image for enlarged view.)
In the Copy Files tab, select where you copied the files from primary to the secondary. If you selected a network path in the Transaction Log Backup Settings (as illustrated in Figure 1), the network path should map to a physical location on your secondary server, and you should enter that path here (i.e., if \\ServerName\ShareName$ is the share name for the local path C:\Backup, enter this path here.)
You can also use a network path where the transaction log backups are stored on the primary server. Select how long you wish to retain the transaction log backups – understanding, of course, that the retention period might conflict with what you set in the Transaction Log Backup Settings dialog box (Figure 1).
You can also select how frequently you want the logs to be copied to the secondary server.
Once you have configured the copying of the transaction log backup files, click on the Restore Transaction Log tab as illustrated in Figure 9.
Figure 9: Restore Transaction Log tab. (Click on image for enlarged view.)
The database recovery state options are:
No Recovery Mode – This is the default option. In this option, the destination database will be inaccessible.
Standby Mode – In this option, the destination database will be read only until the next transaction log backup is applied. After the transaction log is applied, the database will be returned to read-only mode again. This allows read-only access and users will not be able to make any changes to the database (i.e., create indexes) and they will be disconnected when the next database backup is applied.
There is also an option to delay restoring the transaction log backups by a set number of hours or minutes. Some enterprises like to keep their standby server several hours out of sync from their source server.
By default, if the transaction logs are not restored within the interval defined in the "Alert if no restore occurs within" setting, an alert will be raised. Most frequently this alert is raised during a backup operation on the primary.
You also have an option to set how frequently the transaction log restores will take place. These settings are made in the Restore job dialog.
Once you make those settings, click OK and you'll see the Transaction Log Shipping dialog of the Database Properties dialog box (as illustrated in Figure 10).
Figure 10: Transaction Log Shipping tab. (Click on image for enlarged view.)
In this dialog, notice how we have configured server F as our secondary server and that database p2p3 is the database we are log shipping the Northwind database to. You can also log ship to a second secondary/standby server – this could be another secondary server in your DR site.
In any enterprise with a large number of log ship databases, you may want to create a monitor server. To create a monitor server, select "Use a monitor server instance" (Figure 10) and click on the settings button to configure a Log Shipping Monitor server. That option is illustrated in Figure 11.
Figure 11: Create a monitor server. (Click on image for enlarged view.)
In the connect button, connect to the server you wish to use as your monitor. Define how to make the connection, either through Windows authentication or through a SQL Server login. You can define the job history retention and the default is normally a good choice, and then defines how you want to send the alerts. The default option (Start Automatically when SQL Server Agent starts) is the best choice as alerts will be near real time. You can alternatively select an hourly schedule or whatever time interval you choose.
Summary
This completes our look at using the log shipping dialog in SQL Server 2005. This wizard has many options to it and can be quite bewildering to the new user. But the defaults are generally optimal in most cases and I've noted the cases where it's best to use non-

A post for the beginners, How to create a database?

Creating a Database


This is a very simple task, however, one must ask themselves a few questions before doing this.


how much space do we need?


How many concurrent users do we expect on the server?


Always look at more users that you think or will use the database for expation.


What is the DB going to be used for?


I.E Office, Intranet, Internet,

If its going to be used for the internet, intranet we need to think about security because we'll need to utilze predefind zones within the firewall.


Do we need to configure IPSEC or SSL?

Do we know when we can do maitanance esp if the db needs to be up all the time?

Backing up the database?

Creating a db with sql management studio














select databases then right click and click create
















you will now see this screen after you cleck create



When creating a database one rule you should always follow is the database should not contain spaces. Putting spaces in the name requires surrounding it with brackets [My Database] in TSQL to avoid syntax errors and I prefer not to have to use the brackets. I follow that by almost always setting the owner of the database to SA. In most cases on systems I administer changes will always be made by someone in the sysadmin group, rarely we might put someone into the db_owner role to let them make whatever changes are needed. The only value derived from being the database owner rather than a member of db_owner is that the owner is also dbo, which means that if they don't qualify objects with a schema when created they will be owned by dbo by default. Members of db_owner can use two part syntax to put objects into the dbo schema (create table dbo.employess...). Note that you can always change the database owner later using sp_changedbowner.
Full text indexing isn't commonly used, but we can activate now if we know it will be needed, or it can activated later.
As you can see in the next image I'm going to call my database Test, and it has automatically generated the logical names for the two files that get created by default (one MDF and one LDF). I never change these names as the generated one works fine. Moving to the right I've taken a guess at my database usage and increased the database size to 100 MB and the log size to 10 MB. It's just a guess though, so why bother? Mainly to avoid fragmentation by allocating a good chunk of space at once.



















Once you have selected the databases name in this case you will need to change the default values for autogrow to do this click on the 3 dots next to the autogrowth it will look like bt 1mb, unrestricted growth ... you will then see this screen

I rarely turn autogrow off, and contrary to what many recommend I typically let the database grow if and when it's needed, not deliberately resizing the database to maintain a given amount of free space. The advantage to doing it deliberately is that you avoid the performance hit of waiting for the file to grow if it happens during production hours, but that penalty has been greatly reduced in SQL 2005 with instant initialization, a feature change that causes SQL to initialize pages on first write instead of during the auto grow. The advantage of doing it my way is less time spent on a very mundane task. The more important decision is whether to grow in percent or megabytes, and how much. There's no right answer, but in general we want to grow in as large a chunk as makes sense to avoid physical fragmentation on the drive. For this example I'm going to set both to grow at 10%, and leave file growth unrestricted.

Click on the tab next to path
and change the db bath where you would like the data and log to go and the click cilck ok this will create the database you can also change the name of the file here if you like.

Here is also the code to create this db.

REATE DATABASE [SCD] ON PRIMARY ( NAME = N'SCD', FILENAME = N'G:\MSSQL2K5\MSSQL.1\MSSQL\DATA\SCD.mdf' , SIZE = 102400KB , FILEGROWTH = 1024KB ) LOG ON ( NAME = N'SCD_log', FILENAME = N'G:\MSSQL2K5\MSSQL.1\MSSQL\DATA\SCD_log.ldf' , SIZE = 10240KB , FILEGROWTH = 10%)GOEXEC dbo.sp_dbcmptlevel @dbname=N'SCD', @new_cmptlevel=90GOALTER DATABASE [SCD] SET ANSI_NULL_DEFAULT OFF GOALTER DATABASE [SCD] SET ANSI_NULLS OFF GOALTER DATABASE [SCD] SET ANSI_PADDING OFF GOALTER DATABASE [SCD] SET ANSI_WARNINGS OFF GOALTER DATABASE [SCD] SET ARITHABORT OFF GOALTER DATABASE [SCD] SET AUTO_CLOSE OFF GOALTER DATABASE [SCD] SET AUTO_CREATE_STATISTICS ON GOALTER DATABASE [SCD] SET AUTO_SHRINK OFF GOALTER DATABASE [SCD] SET AUTO_UPDATE_STATISTICS ON GOALTER DATABASE [SCD] SET CURSOR_CLOSE_ON_COMMIT OFF GOALTER DATABASE [SCD] SET CURSOR_DEFAULT GLOBAL GOALTER DATABASE [SCD] SET CONCAT_NULL_YIELDS_NULL OFF GOALTER DATABASE [SCD] SET NUMERIC_ROUNDABORT OFF GOALTER DATABASE [SCD] SET QUOTED_IDENTIFIER OFF GOALTER DATABASE [SCD] SET RECURSIVE_TRIGGERS OFF GOALTER DATABASE [SCD] SET AUTO_UPDATE_STATISTICS_ASYNC OFF GOALTER DATABASE [SCD] SET DATE_CORRELATION_OPTIMIZATION OFF GOALTER DATABASE [SCD] SET PARAMETERIZATION SIMPLE GOALTER DATABASE [SCD] SET READ_WRITE GOALTER DATABASE [SCD] SET RECOVERY FULL GOALTER DATABASE [SCD] SET MULTI_USER GOALTER DATABASE [SCD] SET PAGE_VERIFY CHECKSUM GOUSE [SCD]GOIF NOT EXISTS (SELECT name FROM sys.filegroups WHERE is_default=1 AND name = N'PRIMARY') ALTER DATABASE [SCD] MODIFY FILEGROUP [PRIMARY] DEFAULTGO
go
sp_changedbowner 'sa'
go

What to do if your transaction log feels up



In MSSQL your Tranlog will only fill out if your hard drive is full

Before I show you how to shrink the Tran log I will explain a little about it and what the tranlog is for.

The Transaction Log is Probably the most underutilized and underpperciated part of SQL. this is probably due to its simplicity.

The transaction log can actually help to lower the server’s resources, increase productivity, all the while providing a method of an additional safety layer for you to fall back on!

Why does the tran log exist?

This history of the existence of the transaction log is difficult to trace; and is to a small degree controversial due to certain aspects are based on other applications. An example is the method to use transaction logs to restore databases is primarily based on proven methods used in Microsoft Access, which in turn is based on other application(s) and standards. This means there is no single document that states the history of Transaction Logs and most current documentation is partially based on personal experience, knowledge, and/or interpretations.
I’ll make no attempt to determine the exact history of transaction logs. I just want to point out that transaction logs are largely based on Microsoft Access methods to store information for transactions. This is important to know because it has become a common misunderstanding that using the TRUNCATE command should not only remove old transaction information, but also should SHRINK the file down. This is absolutely, without a doubt…WRONG! I’ll get into this more as this article progresses. First, to be able to understand why TRUNCATE will not make the log smaller in physical size you will need to understand how the transactions are inserted into the log; both physically and logically.
The primary purpose the transaction log is to provide a method to be able to restore a database to a point-in-time when necessary. This can include rolling back transactions to a certain time, or to roll forward transactions from a full backup restoration.
In recent years the transaction log is starting to be used for additional purposes. They are being used in mirroring databases, quickly importing data from old database versions to a new database version, and security audits. This is just the tip of the iceberg for the way these logs are being utilized to gain “useful” information. The last point, security audits, is a more relatively new and undocumented (officially, that is) use of transaction logs. Information stored within transaction logs can be very powerful, but also tricky to learn at first while the structure and information being stored is new to you. If you take your time to learn what you can get out of the transaction log, you can produce very useful information that can help to increase your productivity (especially in the Security Audits area)!
An example of how to use these logs for security audits is that you can view a transaction log and be able to detect transactions that have occurred and whom (what user or system account) had produced the transaction (this is no trivial task to accomplish); and if you know your database very well you can quickly deduce if someone is accessing parts of the database they shouldn't’t be; or even worse, if an unauthorized user has accessed your database! The techniques to use the transaction logs for security audits go beyond the scope of this article; the preceding is intended to provide a broader understanding of methods being used to obtain useful information. You can search sites such as MSDN, SQLServerCentral.com, and Google for discussions and articles detailing many different ways to audit the logs and obtain additional useful information. It is also worthwhile to note that there are 3rd party applications that utilize the transaction logs for security auditing, data movement, and many other purposes. As always, evaluate any 3rd party application when possible; and test on TEST servers before ever using new techniques or applications in a production environment.

How the Transaction Log Works
As mentioned in the “Logical Architecture (Concept)” portion, transactions are recorded into the log in a sequential manner. The LSN for each transaction will have a value that is higher than the previously written transaction. It’s actually just that simple; no complicated mathematical methods are used. There is no clever programming to add new transactions. It’s literally the same as taking a piece of paper and writing in each event that has happened in your day and using an ID number starting with 1 and increasing by 1 for each event occurring to give the order of the sequence. And just like your day, the next event that occurs to you must happen after the previous event; thus you’d give the next event a higher ID number than the previous. It’s safe to say that most of the following events have happened in this order for you to read this article:
1) You told your computer to go to the link, or to open a document, containing this article.
2) Your device displayed the article on your display, or printed the article to another format to be viewed.
3) You started reading this article.
As with these events, you can’t have the 2nd and 3rd events occur without first event obtaining the article, and you can’t read the article unless you view/print it (thus #3 can’t occur without #2 occurring first). As you can see, you can’t have these steps logged out of order to recreate this process; so each step has a higher ID number than the previous step.
A transaction log is just exactly that in concept, a journal of events that occur within your database. The main points to remember here is that each event is given a LSN that is higher than the previous LSN (to keep track of what event happened in what sequence), and that there is enough information being stored so that SQL Server may recreate the event should a restore of the database be required.

Microsoft Recommendations
The Transaction Log is recommended, by Microsoft, to be located on a mirrored drive in a fault-tolerant storage system. It is also recommended, by many experts, to ensure that the log is located on its own physical hard drive. These two recommendations are based on the idea that should the database (or the hard drive holding the database) become corrupt that you can ensure that you have access to the Transaction Log during your recovery operation. Also, this setup will help to ensure you have minimal impact on your database performance due to the consistent writing to the Transaction Log; obviously to have a hard drive write to the database and then to write to the Transaction Log would require it to physically write twice. If the database and hard drive are on separate drives then each drive can only write once, thus improving your performance

My recommendations

I would recommend that you create a database and run allot of tests with large and small amounts of data to mimic a real life situation.
I’d recommend starting off with a modest amount of space for your transaction log (ranging from 5% to 10% of the database size). The objective is to get a picture of how big the transaction log will need to be from regular usage. Assuming there are not any drastic unusual amounts of data being passed through the system, after about 3 full backups (using your regularly scheduled intervals) with NOT shrinking the log file, you can feel fairly confident in knowing the size the transaction log needs to be.

It is common to use the log size being produced after the database has had enough regular usage data passed through plus another 10 – 20% of the transaction log size for small abnormal transactions and maintenance; such as a small number of records being archived (if archiving is done on a more regular basis, such as monthly or quarterly), additional tables are added later to hold new data requirements, or reports are being created and stored within the database. These are all concerns that are best addressed earlier in the development cycle; however, if the transaction log needs to be increased later that can always be accomplished by the administrator (or by SQL Server if left at defaults).
Keep track of the log file size throughout the life span of the database; if you notice it grow suddenly then look into the cause. The highest concern would be a massive amount of data being modified (either from archiving, or even worse deleting); if either of these are the case then make sure that the transaction log is not truncated. You may find that you will need this to rollback that large transaction, if this occurred during a non-planned time (such as a misuse of the DELETE or DROP commands by a database user). This is where the transaction log can literally save your job!
The last and probably most helpful recommendation I can give is to take a proactive approach to sizing your database’s transaction log file. Be generous in the amount of space you reserve for the transaction log. And most importantly if you expect (or even suspect) the log will need to be increased do this as soon as possible during off-peak hours; this will have the least amount of impact on your system and will result in the optimal performance of the system (while avoiding any unexpected performance degrading that might occur during peak hours!); remember that the less resources you utilize means the more resources that can be given to help your queries and reports!

Ok so now you are thinking how do I shrink the log down here is how you do it.

firstly you will need to kill the processes that are attached to the db.

The process for clearing this is as follows

1) dbcc opentran(database) - this will show the oldest open transaction

2) Kill this process

3) backup log database with no_log

4) use database

5) dbcc shrinkfile(database,100) - shrinks the log back to 100mb

additional resources if needed
TechNet: Working with Transaction Log Backups - http://technet.microsoft.com/en-us/library/ms190440.aspxTechNet: Restoring a Database to a Point Within a Backup - http://technet.microsoft.com/en-us/library/ms190244.aspxTechNet: Recovering to a Log Sequence Number (LSN) - http://technet.microsoft.com/en-us/library/ms191459.aspxTechNet: Best Practices for Recovering a Database to a Specific Recovery Point - http://technet.microsoft.com/en-us/library/ms191468.aspxTechNet: Security Considerations for Backup and Restore - http://technet.microsoft.com/en-us/library/ms190964.aspxMSDN: DBCC SHRINKFILE (Transact-SQL): http://msdn.microsoft.com/en-us/library/ms189493.aspxMSDN: DBCC SQLPERF (Transact-SQL): http://msdn.microsoft.com/en-us/library/ms189768.aspx