How to Backup your MySql database on a bitnami wordpress site

I recently managed to explode my wordpress site (whilst trying to upgrade PHP). Anyway, luckily I had created an AMI a month ago – but I had written a few articles since then and so wanted to avoid rewriting them. So below is a method to create a backup of your wordpress mysql database to S3 and recover it onto a new wordpress server. Note: I actually mounted the corrupt instance as a volume and did this the long way around.

Step 1: Create an S3 bucket to store the backup

$ aws s3api create-bucket \
>     --bucket andrewbakerninjabackupdb \
>     --region af-south-1 \
>     --create-bucket-configuration LocationConstraint=af-south-1
Unable to locate credentials. You can configure credentials by running "aws configure".
$ aws configure
AWS Access Key ID [None]: XXXXX
AWS Secret Access Key [None]: XXXX
Default region name [None]: af-south-1
Default output format [None]: 
$ aws s3api create-bucket     --bucket andrewbakerninjabackupdb     --region af-south-1     --create-bucket-configuration LocationConstraint=af-south-1
{
    "Location": "http://andrewbakerninjabackupdb.s3.amazonaws.com/"
}
$ 

Note: To get your API credentials simply go to IAM, Select the Users tab and then Select Create Access Key

Step 2: Create a backup of your MsSql database and copy it to S3

For full backups follow the below script (note: this wont be restorable across mysql versions as it will include the system “mysql” db)

# Check mysql is install/version (note you cannot restore across versions)
mysql --version
# First get your mysql credentials
sudo cat /home/bitnami/bitnami_credentials
Welcome to the Bitnami WordPress Stack

******************************************************************************
The default username and password is XXXXXXX.
******************************************************************************

You can also use this password to access the databases and any other component the stack includes.

# Now create a backup using this password
$ mysqldump -A -u root -p > backupajb.sql
Enter password: 
$ ls -ltr
total 3560
lrwxrwxrwx 1 bitnami bitnami      17 Jun 15  2020 apps -> /opt/bitnami/apps
lrwxrwxrwx 1 bitnami bitnami      27 Jun 15  2020 htdocs -> /opt/bitnami/apache2/htdocs
lrwxrwxrwx 1 bitnami bitnami      12 Jun 15  2020 stack -> /opt/bitnami
-rw------- 1 bitnami bitnami      13 Nov 18  2020 bitnami_application_password
-r-------- 1 bitnami bitnami     424 Aug 25 14:08 bitnami_credentials
-rw-r--r-- 1 bitnami bitnami 3635504 Aug 26 07:24 backupajb.sql

# Next copy the file to your S3 bucket
$ aws s3 cp backupajb.sql s3://andrewbakerninjabackupdb
upload: ./backupajb.sql to s3://andrewbakerninjabackupdb/backupajb.sql
# Check the file is there
$ aws s3 ls s3://andrewbakerninjabackupdb
2022-08-26 07:27:09    3635504 backupajb.sql

OR for partial backups, follow the below to just backup the bitnami wordpress database:

# Login to database
mysql -u root -p
show databases;
+--------------------+
| Database           |
+--------------------+
| bitnami_wordpress  |
| information_schema |
| mysql              |
| performance_schema |
| sys                |
+--------------------+
exit
$ mysqldump -u root -p --databases bitnami_wordpress > backupajblight.sql
Enter password: 
$ ls -ltr
total 3560
lrwxrwxrwx 1 bitnami bitnami      17 Jun 15  2020 apps -> /opt/bitnami/apps
lrwxrwxrwx 1 bitnami bitnami      27 Jun 15  2020 htdocs -> /opt/bitnami/apache2/htdocs
lrwxrwxrwx 1 bitnami bitnami      12 Jun 15  2020 stack -> /opt/bitnami
-rw------- 1 bitnami bitnami      13 Nov 18  2020 bitnami_application_password
-r-------- 1 bitnami bitnami     424 Aug 25 14:08 bitnami_credentials
-rw-r--r-- 1 bitnami bitnami 2635204 Aug 26 07:24 backupajblight.sql
# Next copy the file to your S3 bucket
$ aws s3 cp backupajblight.sql s3://andrewbakerninjabackupdb
upload: ./backupajblight.sql to s3://andrewbakerninjabackupdb/backupajblight.sql
# Check the file is there
$ aws s3 ls s3://andrewbakerninjabackupdb
2022-08-26 07:27:09    2635204 backupajblight.sql

Step 3: Restore the file on your new wordpress server

Note: If you need the password, use the cat command from Step 2.

#Copy the file down from S3
$ aws s3 cp s3://andrewbakerninjabackupdb/backupajbcron.sql restoreajb.sql --region af-south-1
#Restore the db
$ mysql -u root -p < restoreajb.sql

Step 4: Optional – Automate the Backups using Cron and S3 Versioning

This part is unnecessary (and one could credibly argue that AWS Backup is the way to go – but am not a fan of its clunky UI). Below I enable S3 versioning and create a Cron job to backup the database every week. I will also set the S3 lifecycle policy to delete anything older than 90 days.

# Enable bucket versioning
aws s3api put-bucket-versioning --bucket andrewbakerninjabackupdb --versioning-configuration Status=Enabled
# Now set the bucket lifecycle policy
nano lifecycle.json 

Now paste the following policy into nano and save it (as lifecycle.json):

{
    "Rules": [
        {
            "Prefix": "",
            "Status": "Enabled",
            "Expiration": {
                "Days": 90
            },
            "ID": "NinetyDays"
        }
    ]
}

Next add the lifecycle policy to delete anything older than 90 days (as per above policy):

aws s3api put-bucket-lifecycle --bucket andrewbakerninjabackupdb --lifecycle-configuration file://lifecycle.json
## View the policy
aws s3api get-bucket-lifecycle-configuration --bucket andrewbakerninjabackupdb

Now add a CronJob to run every week:

## List the cron jobs
crontab -l
## Edit the cron jobs
crontab -e
## Enter these lines. 
## Backup on weds at 12:00 and copy it to S3 at 1am (cron format: min hour day month weekday (sunday is day zero))
1 0 * * SAT /opt/bitnami/mysql/bin/mysqldump -A -uroot -pPASSWORD > backupajbcron.sql
1 2 * * SAT /opt/bitnami/mysql/bin/mysqldump -u root -pPASSWORD --databases bitnami_wordpress > backupajbcronlight.sql
0 3 * * SAT aws s3 cp backupajbcron.sql s3://andrewbakerninjabackupdb
0 4 * * SAT aws s3 cp backupajbcronlight.sql s3://andrewbakerninjabackupdb

How to Fix SSH error: WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED

If the fingerprint of your remote host changes you will see the following error message appear:

 ~ % ssh -i "mykey.pem" user_id@host.af-south-1.compute.amazonaws.com
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
Someone could be eavesdropping on you right now (man-in-the-middle attack)!
It is also possible that a host key has just been changed.
The fingerprint for the ED25519 key sent by the remote host is
SHA256:S60CvpE17ri+E594StxXBQcNIrga4Nb7uX4s7BPr3dw.
Please contact your system administrator.
Add correct host key in /Users/user_id/.ssh/known_hosts to get rid of this message.
Offending ED25519 key in /Users/user_id/.ssh/known_hosts:2
Host key for my_host.af-south-1.compute.amazonaws.com has changed and you have requested strict checking.
Host key verification failed.

There are many ways to fix this. The easiest of which is simply to delete your “known_hosts” file. This will mean you just need to accept new finger prints on all your SSH hosts. Yes, this is very lazy…

rm ~/.ssh/known_hosts

Macbook: How to get your Mac to behave like MS Windows to restore minimised windows when using Command + Tab (Alt + Tab)

For those who like to maximise or minimise their windows on a Mac, you will likely be frustrated by the default behaviour of your Macbook (in that it doesn’t restore/focus minimised or maximised screens). Below are a few steps to make your mac screen restores behave like Microsoft Windows:

Install Homebrew (if you dont have):

## Install homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
## IMPORTANT: Once the install finishes run the two commands displayed in the terminal window
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> $HOME/.zprofile
eval "$(/opt/homebrew/bin/brew shellenv)"

Install AltTab:

brew install --cask alt-tab

Next run the AltTab application (click the magnify glass/search glass in the top right of your macbook (near the clock) and then type “AltTab”). When it starts up it will ask you to permission it to access the various system accessibility functions (ie window preview). If you don’t adjust the settings you will need to switch from using “Command + Tab” to using “Option + Tab”, or read the note below to adjust the settings…

Note: I recommend the following tweaks…

If you want to use the default windows style for tab keystrokes, you will need to change the “Controls” tab setting called “Hold” from “Option” to “Command” as per below:

Next, go to Appearance tab and change the Theme to “Windows 10” (as its hard to see the focus window on Mac style):

Note: detailed documents on AltTab can be found here: https://alt-tab-macos.netlify.app/

How to Automatically Turn your bluetooth off and on when you open and close your MacBook

If you’re like me, little things bother you. When I turn on my bluetooth headset and it connects to my Macbook when its closed/sleeping, I get very frustrated. So I wrote a simple script to fix this behaviour. After running the script below, when you close the lid on your Macbook it will automatically turn bluetooth off. When you open you Macbook it will automatically re-enable bluetooth. Simple 🤓

If you need to install brew/homebrew on your mac then run this:

## Install homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
## IMPORTANT: Once the install finishes run the two commands displayed in the terminal window
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> $HOME/.zprofile
eval "$(/opt/homebrew/bin/brew shellenv)"

Script to automatically enable/disable bluetooth:

## Install the bluetooth util and sleepwatcher
brew install sleepwatcher blueutil
## This creates a file which switches bluetooth off when the macbook lid is closed
echo "$(which blueutil) -p 0" > ~/.sleep
## This creates a file which switches on bluetooth when the lid is open
echo "$(which blueutil) -p 1" > ~/.wakeup
## This makes both the files runable
chmod 755 ~/.sleep ~/.wakeup
## Finally restart the sleepwatcher service (to pickup the new files)
brew services restart sleepwatcher

Tip: Using the Watch command to poll a URL

If you want to quickly test a URL for changes, then the linux Watch command couple with Curl is a really simple way to hit a URL every n seconds (I use this for blue/green deployment testing to make sure there is no downtime when cutting over):

# Install watch command using homebrew
brew install watch
# Poll andrewbaker.ninja every 1 seconds
watch -n 1 curl andrewbaker.ninja

How to trigger Scaling Events using Stress-ng Command

If you are testing how your autoscaling policies respond to CPU load then a really simple way to test this is using the “stress” command. Note: this is a very crude mechanism to test and wherever possible you should try and generate synthetic application load.

#!/bin/bash

# DESCRIPTION: After updating from the repo, installs stress-ng, a tool used to create various system load for testing purposes.
yum update -y
# Install stress-ng
sudo apt install stress-ng

# CPU spike: Run a CPU spike for 5 seconds
uptime
stress-ng --cpu 4 --timeout 5s --metrics-brief
uptime

# Disk Test: Start N (2) workers continually writing, reading and removing temporary files:
stress-ng --disk 2 --timeout 5s --metrics-brief

# Memory stress test
# Populate memory. Use mmap N bytes per vm worker, the default is 256MB. 
# You can also specify the size as % of total available memory or in units of 
# Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g:
# Note: The --vm 2 will start N workers (2 workers) continuously calling 
# mmap/munmap and writing to the allocated memory. Note that this can cause 
# systems to trip the kernel OOM killer on Linux systems if not enough 
# physical memory and swap is not available
stress-ng --vm 2 --vm-bytes 1G --timeout 5s

# Combination Stress
# To run for 5 seconds with 4 cpu stressors, 2 io stressors and 1 vm 
# stressor using 1GB of virtual memory, enter:
stress-ng --cpu 4 --io 2 --vm 1 --vm-bytes 1G --timeout 5s --metrics-brief

How to Install Apps From Anywhere on Apple Mac

Previously Macs would allow you to install software from anywhere. Now you will see the error message “NMAPxx.mpkg cannot be opened because its from an unidentified developer”. If you want to fix this and enable apps to be install from anywhere, you will need to run the following command line:

sudo spctl --master-disable

Once you have run the script you should then see the “Anywhere” option in the System Preferences > Security & Privacy Tab!

Definition: Bonuscide

bonuscide

noun

Definition of bonuscide:

Bonuscide is a term used to describe incentive schemes that progressively poisons an organisation by ensuring the flow of discretionary pay is non does not serve the organisations goals. These schemes can be observed in two main ways, the loss of key staff or the reduction in client/customer base.

Bonuscide becomes more observable during a major crisis (for example covid 19). Companies that practise this will create self harm by amplifying the fiscal impact of the crisis on a specific population of staff that are key to the companies success. For example, legacy organisations will tend to target skills that the board or exco don’t understand and disproportionately target its technology teams, whilst protect their many layers of management.

The kinds of symptoms that will be visible are listed below:

  1. Rolling downside metrics: A metric will be used to reduce the discretionary pay pool, but this metric was never previously used to as an upside metric. If at some future stage the metric becomes favourable
  2. Pivot Upside Metrics: If the financial measure that was chosen in 1) improves in the future; a new/alternative unfavourable financial measure will be substituted.
  3. Status Quo: Discretionary pay will always favour the preservation of the management of status quo. Incentives will never flow to those involved in execution or change, because these companies are governed Pournelle’s Iron Law of Bureaucracy.
  4. Panic Pay: Companies that practice bonuside are periodically forced to carry out poorly thought through emergency incentives to their residual staff. This will create a negative selection process (whereby they lockin the tail performers after loosing their top talent).
  5. Trust Vacuum: Leaders involved in managing this pay process will feel compromised, as they know that the trusted relationship with their team will be indefinitely tainted.
  6. Business Case: The savings generated by the reduced discretionary compensation will be a small fraction of the additional costs and revenue impact that that the saving in compensation will have. This phenomenon is well covered in my previous post on Constraint Theory.

Put simply, if a business case was created for this exercise, it wouldn’t see the light of day. The end result of bonuscide is the creation of a corporate trust / talent vacuum that leads to significant long term harm and brand damage.

Part 2: Increasing your Cloud consumption (the sane way)

Introduction

This article follows on from the “Cloud Migrations Crusade” blog post…

A single tenancy datacenter is a fixed scale, fixed price service on a closed network. The costs of the resources in the datacenter are divided up and shared out to the enterprise constituents on a semi-random basis. If anyone uses less resources than the forecast this generates waste which is shared back to the enterprise. If there is more demand than forecasted, it will either generate service degradation, panic or an outage! This model is clearly fragile and doesn’t respond quickly to change; it is also wasteful as it requires a level of overprovisioning based on forecast consumption (otherwise you will experience delays in projects, service degradation or have reduced resilience).

Cloud, on the other hand is a multi-tenanted on demand software service which you pay for as you use. But surely having multiple tenants running on the same fixed capacity actually increases the risks, and just because its in the cloud it doesn’t mean that you can get away without over provisioning – so who sits with the over provisioned costs? The cloud providers have to build this into their rates. So cloud providers have to deal with a balance sheet of fixed capacity shared amongst customers running on demand infrastructure. They do this with very clever forecasting, very short provisioning cycles and asking their customers for forecasts and then offering discounts for pre-commits.

Anything that moves you back towards managing resources levels / forecasting will destroy a huge portion of the value of moving to the cloud in the first instance. For example, if you have ever been to a Re:Invent you will be flawed by the rate of innovation and also how easy it is to absorb these new innovative products. But wait – you just signed a 5yr cost commit and now you learn about Aurura’s new serverless database model. You realise that you can save millions of dollars; but you have to wait for your 5yr commits to expire before you adopt or maybe start mining bitcoin with all your excess commits! This is anti-innovation and anti-customer.

Whats even worse is that pre-commits are typically signed up front on day 1- this is total madness!!! At the point where you know nothing about your brave new world, you use the old costs as a proxy to predict the new costs so that you can squeeze a lousy 5px saving at the risk of 100px of the commit size! What you will start to learn is that your cloud success is NOT based on the commercial contract that you sign with your cloud provider; its actually based on the quality of the engineering talent that your organisation is able to attract. Cloud is a IP war – its not a legal/sourcing war. Allow yourself to learn, don’t box yourself in on day 1. When you sign the pre-commit you will notice your first year utilisation projections are actually tiny and therefore the savings are small. So whats the point of signing so early on when the risk is at a maximum and the gains are at a minimum? When you sign this deal you are essentially turning the cloud into a “financial data center” – you have destroyed the cloud before you even started!

A Lesson from the field – Solving Hadoop Compute Demand Spike:

We moved 7000 cores of burst compute to AWS to solve a capacity issue on premise. That’s expensive, so lets “fix the costs”! We can go a sign a RI (reserved instance), play with spot, buy savings plans or even beg / barter for some EDP relief. But instead we plugged the service usuage into Quicksight and analysed the queries. We found one query was using 60 percent of the entire banks compute! Nobody confessed to owning the query, so we just disabled it (if you need a reason for your change management; describe the change as “disabling a financial DDOS”). We quickly found the service owner and explained that running a table scan across billions of rows to return a report with just last months data is not a good idea. We also explained that if they don’t fix this we will start billing them in 6 weeks time (a few million dollars). The team deployed a fix and now we run the banks big data stack at half the costs – just by tuning one query!!!

So the point of the above is that there is no substitute for engineering excellence. You have to understand and engineer the cloud to win, you cannot contract yourself into the cloud. The more contracts you sign the more failures you will experience. This leads me to point 2…

Step 2: Training, Training, Training

Start the biggest training campaign you possibly can – make this your crusade. Train everyone; business, finance, security, infrastructure – you name it, you train it. Don’t limit what anyone can train on, training is cheap – feast as much as you can. Look at Udemy, ACloudGuru, Youtube, WhizLabs etc etc etc. If you get this wrong then you will find your organisation fills up with expensive consultants and bespoke migration products that you don’t need ++ can easily do yourself, via opensource or with your cloud provider toolsets. In fact I would go one step further – if your not prepared to learn about the cloud, your not ready to go there.

Step 3: The OS Build

When you do start your cloud migration and begin to review your base OS images – go right back to the very beginning, remove every single product in all of these base builds. Look at what you can get out the box from your cloud provider and really push yourself hard on what do I really need vs nice to have. But the trick is that to get the real benefit from a cloud migration, you have to start by making your builds as “naked” as possible. Nothing should move into the base build without a good reason. Ownership and report lines are not a good enough reason for someones special “tool” to make it into the build. This process, if done correctly, should deliver you between 20-40px of your cloud migration savings. Do this badly and your costs, complexity and support will all head in the wrong direction.

Security HAS to be a first class citizen of your new world. In most organizations this will likely make for some awkward cultural collisions (control and ownership vs agility) and some difficult dialogs. The cloud, by definition, should be liberating – so how do you secure it without creating a “cloud bunker” that nobody can actually use? More on this later… 🙂

Step 4: Hybrid Networking

For any organisation with data centers – make no mistake, if you get this wrong its over before it starts.

Iterating through the contents of the ROT (running objects table) using C#

Back in the day I had to do quite a bit of COM / .Net interop. For some reason (and I cannot remember what it was), I need to be able to enumerate the COM running objects table (ROT). I think it was to do with createboject/getobject and trying to synthesis a singleton for an exe.

The System.Runtime.InteropServices.Marshal.GetActiveObject method will return an existing instance of application registered in the Running Objects Table (ROT) by matching the prog id (or moniker) to an entry in this table.

Note, Office applications do not register themselves if another instance is already in the ROT because the moniker for itself is always the same, and cannot be distinguished. This means that you cannot attach to any instance except for the first instance. However, because Office applications also register their documents in the ROT, you can successfully attach to other instances by iterating the ROT looking for a specific document, and attaching to this document to get the Application object from this document.

The source code below contains a ROTHelper class, which among other things, demonstrates how to iterate through the ROT. Note, once you have the desired application object you will either need to use reflection or cast to the relevant COM interface to call any methods.

using System;
using System.Net.Sockets;
using System.Runtime.InteropServices;
using System.Collections;

#region TestROT Class

/// <summary>
/// Test ROT class showing how to use the ROTHelper.
/// </summary>
class TestROT
{
	/// <summary>
	/// The main entry point for the application.
	/// </summary>
	[STAThread]
	static void Main(string[] args)
	{
		// Iterate through all the objects in the ROT
		Hashtable runningObjects = ROTHelper.GetActiveObjectList(null);
		// Display the object ids
		foreach( DictionaryEntry de in runningObjects ) 
		{
			string progId = de.Key.ToString();
			if ( progId.IndexOf("{") != -1 ) 
			{
				// Convert a class id into a friendly prog Id
				progId = ROTHelper.ConvertClassIdToProgId( de.Key.ToString() ) ;
			}
			Console.WriteLine( progId );
			object getObj = ROTHelper.GetActiveObject(progId);
			if ( getObj != null ) 
			{
				Console.WriteLine( "Fetched: " + progId );
			}
			else
			{
				Console.WriteLine( "!!!!!FAILED TO fetch: " + progId );
			}
		}
		Console.ReadLine();
	}
}
#endregion TestROT Class

#region ROTHelper Class

/// <summary>
/// The COM running object table utility class.
/// </summary>
public class ROTHelper
{
	#region APIs

	[DllImport("ole32.dll")]  
	private static extern int GetRunningObjectTable(int reserved, 
		out UCOMIRunningObjectTable prot); 

	[DllImport("ole32.dll")]  
	private static extern int  CreateBindCtx(int reserved, 
		out UCOMIBindCtx ppbc);

	[DllImport("ole32.dll", PreserveSig=false)]
	private static extern void CLSIDFromProgIDEx([MarshalAs(UnmanagedType.LPWStr)] string progId, out Guid clsid);

	[DllImport("ole32.dll", PreserveSig=false)]
	private static extern void CLSIDFromProgID([MarshalAs(UnmanagedType.LPWStr)] string progId, out Guid clsid);

	[DllImport("ole32.dll")]
	private static extern int ProgIDFromCLSID([In()]ref Guid clsid, [MarshalAs(UnmanagedType.LPWStr)]out string lplpszProgID);

	#endregion

	#region Public Methods
	
	/// <summary>
	/// Converts a COM class ID into a prog id.
	/// </summary>
	/// <param name="progID">The prog id to convert to a class id.</param>
	/// <returns>Returns the matching class id or the prog id if it wasn't found.</returns>
	public static string ConvertProgIdToClassId( string progID )
	{
		Guid testGuid;
		try
		{
			CLSIDFromProgIDEx(progID, out testGuid);
		}
		catch
		{
			try
			{
				CLSIDFromProgID(progID, out testGuid);
			}
			catch
			{
				return progID;
			}
		}
		return testGuid.ToString().ToUpper();
	}

	/// <summary>
	/// Converts a COM class ID into a prog id.
	/// </summary>
	/// <param name="classID">The class id to convert to a prog id.</param>
	/// <returns>Returns the matching class id or null if it wasn't found.</returns>
	public static string ConvertClassIdToProgId( string classID )
	{
		Guid testGuid = new Guid(classID.Replace("!",""));
		string progId = null;
		try
		{
			ProgIDFromCLSID(ref testGuid, out progId);
		}
		catch (Exception)
		{
			return null;
		}
		return progId;
	}

	/// <summary>
	/// Get a snapshot of the running object table (ROT).
	/// </summary>
	/// <returns>A hashtable mapping the name of the object in the ROT to the corresponding object
	/// <param name="filter">The filter to apply to the list (nullable).</param>
	/// <returns>A hashtable of the matching entries in the ROT</returns>
	public static Hashtable GetActiveObjectList(string filter)
	{
		Hashtable result = new Hashtable();

		int numFetched;
		UCOMIRunningObjectTable runningObjectTable;   
		UCOMIEnumMoniker monikerEnumerator;
		UCOMIMoniker[] monikers = new UCOMIMoniker[1];

		GetRunningObjectTable(0, out runningObjectTable);    
		runningObjectTable.EnumRunning(out monikerEnumerator);
		monikerEnumerator.Reset();          

		while (monikerEnumerator.Next(1, monikers, out numFetched) == 0)
		{     
			UCOMIBindCtx ctx;
			CreateBindCtx(0, out ctx);     
        
			string runningObjectName;
			monikers[0].GetDisplayName(ctx, null, out runningObjectName);

			object runningObjectVal;  
			runningObjectTable.GetObject( monikers[0], out runningObjectVal); 
			if ( filter == null || filter.Length == 0 || filter.IndexOf( filter ) != -1 ) 
			{
				result[ runningObjectName ] = runningObjectVal;
			}
		} 

		return result;
	}

	/// <summary>
	/// Returns an object from the ROT, given a prog Id.
	/// </summary>
	/// <param name="progId">The prog id of the object to return.</param>
	/// <returns>The requested object, or null if the object is not found.</returns>
	public static object GetActiveObject(string progId)
	{
		// Convert the prog id into a class id
		string classId = ConvertProgIdToClassId( progId );

		UCOMIRunningObjectTable prot = null;
		UCOMIEnumMoniker pMonkEnum = null;
		try
		{
    		int Fetched = 0;
			// Open the running objects table.
			GetRunningObjectTable(0, out prot);
			prot.EnumRunning(out pMonkEnum);
			pMonkEnum.Reset();
			UCOMIMoniker[] pmon = new UCOMIMoniker[1];

			// Iterate through the results
			while (pMonkEnum.Next(1, pmon, out Fetched) == 0)
			{
				UCOMIBindCtx pCtx;

				CreateBindCtx(0, out pCtx);

				string displayName;
				pmon[0].GetDisplayName(pCtx, null, out displayName);
				Marshal.ReleaseComObject(pCtx);
				if ( displayName.IndexOf( classId ) != -1 )
				{
					// Return the matching object
					object objReturnObject;
					prot.GetObject(pmon[0], out objReturnObject);
					return objReturnObject;
				}
			}
			return null;
		}
		finally
		{
			// Free resources
			if (prot != null )
				Marshal.ReleaseComObject( prot);
			if (pMonkEnum != null )
				Marshal.ReleaseComObject ( pMonkEnum );
		}
	}

	#endregion
}

#endregion