public void MatchServersToFixedBucket()
        {
            try
            {
                int    targetBuckets = 3;
                int    serverCount   = 40;
                int    minDbCount    = 10;
                int    maxDbCount    = 500;
                Random rnd           = new Random();

                string      tmpFile = string.Empty;
                MultiDbData multiData;
                try
                {
                    for (int i = 0; i < 100; i++)
                    {
                        targetBuckets = rnd.Next(2, 90);
                        serverCount   = rnd.Next(targetBuckets + 1, 300);
                        minDbCount    = rnd.Next(10, 150);
                        maxDbCount    = rnd.Next(minDbCount + 1, 500);
                        int[] matrix;
                        (tmpFile, multiData) = CreateRandomizedMultiDbData(serverCount, minDbCount, maxDbCount, out matrix);

                        //The real work
                        var buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);

                        //Numbers for comparisons
                        var flattened    = Concurrency.ConcurrencyByServer(multiData);
                        var idealBucket  = Math.Ceiling((double)flattened.Sum(c => c.Count()) / (double)targetBuckets);
                        int maxBucket    = flattened.Where(c => c.Count() <= idealBucket).Max(c => c.Count()); //exclude the buckets that were already above the ideal
                        int medianBucket = flattened.OrderBy(c => c.Count()).ToList()[(flattened.Count() / 2) + 1].Count();

                        string message = $"Buckets: {targetBuckets}; Servers: {serverCount}; Matrix: {string.Join(",", matrix)}";
                        Assert.AreEqual(targetBuckets, buckets.Count(), message);
                        Assert.IsTrue(buckets.Max(c => c.Count()) < maxBucket + (idealBucket * 1.2), message);


                        var str = Concurrency.ConvertBucketsToConfigLines(buckets);

                        if (File.Exists(tmpFile))
                        {
                            File.Delete(tmpFile);
                        }
                    }
                }
                finally
                {
                    if (File.Exists(tmpFile))
                    {
                        File.Delete(tmpFile);
                    }
                }
            }
            catch (OutOfMemoryException)
            {
                //GitHub actions sometimes will run out of memory running this test!
            }
        }
        public void MatchServersToFixedBucket_ToConfigLines()
        {
            int    targetBuckets = 3;
            int    serverCount   = 40;
            int    minDbCount    = 10;
            int    maxDbCount    = 500;
            Random rnd           = new Random();

            string      tmpFile = string.Empty;
            MultiDbData multiData;

            try
            {
                for (int i = 0; i < 20; i++)
                {
                    targetBuckets = rnd.Next(2, 51);
                    serverCount   = rnd.Next(targetBuckets + 1, 400);
                    minDbCount    = rnd.Next(10, 201);
                    maxDbCount    = rnd.Next(minDbCount + 1, 600);
                    int[] matrix;
                    (tmpFile, multiData) = CreateRandomizedMultiDbData(serverCount, minDbCount, maxDbCount, out matrix);

                    var    buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);
                    var    str     = Concurrency.ConvertBucketsToConfigLines(buckets);
                    string message = $"Buckets: {targetBuckets}; Servers: {serverCount}; Matrix: {string.Join(",", matrix)}";
                    Assert.AreEqual(buckets.Count(), str.Count(), message);
                    for (int j = 0; j < buckets.Count(); j++)
                    {
                        Assert.AreEqual(buckets[j].Count(), str[j].Count(), message);
                        Assert.AreEqual(buckets[j].First().Item1, str[j].First().Substring(0, str[j].First().IndexOf(":")), message);
                        Assert.AreEqual(buckets[j].Last().Item1, str[j].Last().Substring(0, str[j].Last().IndexOf(":")), message);
                    }

                    if (File.Exists(tmpFile))
                    {
                        File.Delete(tmpFile);
                    }
                }
            }
            finally
            {
                if (File.Exists(tmpFile))
                {
                    File.Delete(tmpFile);
                }
            }
        }
        public void MatchDefinedServersToFixedBucket(int targetBuckets, int serverCount, int[] dbsPerServer)
        {
            string      tmpFile = string.Empty;
            MultiDbData multiData;

            try
            {
                (tmpFile, multiData) = CreateDefinedMultiDbData(serverCount, dbsPerServer);

                //The real work
                var buckets = Concurrency.RecombineServersToFixedBucketCount(multiData, targetBuckets);

                //Numbers for comparisons
                var flattened    = Concurrency.ConcurrencyByServer(multiData);
                var idealBucket  = Math.Ceiling((double)flattened.Sum(c => c.Count()) / (double)targetBuckets);
                int maxBucket    = flattened.Where(c => c.Count() <= idealBucket).Max(c => c.Count()); //exclude the buckets that were already above the ideal
                int medianBucket = flattened.OrderBy(c => c.Count()).ToList()[(flattened.Count() / 2) + 1].Count();

                string message = $"Buckets: {targetBuckets}; Servers: {serverCount}; Matrix: {string.Join(",", dbsPerServer)}";
                Assert.AreEqual(targetBuckets, buckets.Count(), message);
                Assert.IsTrue(buckets.Max(c => c.Count()) < maxBucket + idealBucket + (idealBucket * 1.2), message);


                var str = Concurrency.ConvertBucketsToConfigLines(buckets);

                if (File.Exists(tmpFile))
                {
                    File.Delete(tmpFile);
                }
            }
            finally
            {
                if (File.Exists(tmpFile))
                {
                    File.Delete(tmpFile);
                }
            }
        }
示例#4
0
        public async Task <(int retval, string readOnlySas)> StartBatch(bool stream = false, bool unittest = false)
        {
            string applicationPackage = string.Empty;

            if (string.IsNullOrWhiteSpace(cmdLine.BatchArgs.ApplicationPackage))
            {
                switch (cmdLine.BatchArgs.BatchPoolOs)
                {
                case OsType.Linux:
                    applicationPackage = "SqlBuildManagerLinux";
                    break;

                case OsType.Windows:
                default:
                    applicationPackage = "SqlBuildManagerWindows";
                    break;
                }
            }
            else
            {
                applicationPackage = cmdLine.BatchArgs.ApplicationPackage;
            }

            int cmdValid = ValidateBatchArgs(cmdLine, batchType);

            if (cmdValid != 0)
            {
                return(cmdValid, string.Empty);
            }


            //if extracting scripts from a platinum copy.. create the DACPAC here
            if (!string.IsNullOrWhiteSpace(cmdLine.DacPacArgs.PlatinumDbSource) && !string.IsNullOrWhiteSpace(cmdLine.DacPacArgs.PlatinumServerSource)) //using a platinum database as the source
            {
                log.LogInformation($"Extracting Platinum Dacpac from {cmdLine.DacPacArgs.PlatinumServerSource} : {cmdLine.DacPacArgs.PlatinumDbSource}");
                string dacpacName = Path.Combine(cmdLine.RootLoggingPath, cmdLine.DacPacArgs.PlatinumDbSource + ".dacpac");

                if (!DacPacHelper.ExtractDacPac(cmdLine.DacPacArgs.PlatinumDbSource, cmdLine.DacPacArgs.PlatinumServerSource, cmdLine.AuthenticationArgs.AuthenticationType, cmdLine.AuthenticationArgs.UserName, cmdLine.AuthenticationArgs.Password, dacpacName))
                {
                    log.LogError($"Error creating the Platinum dacpac from {cmdLine.DacPacArgs.PlatinumServerSource} : {cmdLine.DacPacArgs.PlatinumDbSource}");
                }
                cmdLine.DacPacArgs.PlatinumDacpac = dacpacName;
            }

            //Check for the platinum dacpac and configure it if necessary
            log.LogInformation("Validating database overrides");
            MultiDbData multiData;
            int?        myExitCode = 0;

            int tmpReturn = 0;

            //TODO: fix this for queue!!!!
            //Validate the override settings (not needed if --servicebusconnection is provided
            string[] errorMessages;
            int      tmpVal = Validation.ValidateAndLoadMultiDbData(cmdLine.MultiDbRunConfigFileName, cmdLine, out multiData, out errorMessages);

            if (tmpVal != 0)
            {
                log.LogError($"Unable to validate database config\r\n{string.Join("\r\n", errorMessages)}");
                return(tmpVal, string.Empty);
            }


            //Validate the platinum dacpac
            var tmpValReturn = Validation.ValidateAndLoadPlatinumDacpac(cmdLine, multiData);

            if (tmpValReturn.Item1 == (int)ExecutionReturn.DacpacDatabasesInSync)
            {
                return((int)ExecutionReturn.DacpacDatabasesInSync, string.Empty);
            }
            else if (tmpReturn != 0)
            {
                return(tmpValReturn.Item1, string.Empty);
            }

            BatchClient batchClient = null;

            //Get the batch and storage values
            string jobId, poolId, storageContainerName;

            (jobId, poolId, storageContainerName) = SetBatchJobAndStorageNames(cmdLine);
            log.LogInformation($"Using Azure Batch account: {cmdLine.ConnectionArgs.BatchAccountName} ({cmdLine.ConnectionArgs.BatchAccountUrl})");
            log.LogInformation($"Setting job id to: {jobId}");

            string readOnlySasToken = string.Empty;

            try
            {
                log.LogInformation($"Batch job start: {DateTime.Now}");
                Stopwatch timer = new Stopwatch();
                timer.Start();

                //Get storage ready
                BlobServiceClient          storageSvcClient = StorageManager.CreateStorageClient(cmdLine.ConnectionArgs.StorageAccountName, cmdLine.ConnectionArgs.StorageAccountKey);
                StorageSharedKeyCredential storageCreds     = new StorageSharedKeyCredential(cmdLine.ConnectionArgs.StorageAccountName, cmdLine.ConnectionArgs.StorageAccountKey);
                string containerSasToken = StorageManager.GetOutputContainerSasUrl(cmdLine.ConnectionArgs.StorageAccountName, storageContainerName, storageCreds, false);
                log.LogDebug($"Output write SAS token: {containerSasToken}");


                // Get a Batch client using account creds, and create the pool
                BatchSharedKeyCredentials cred = new BatchSharedKeyCredentials(cmdLine.ConnectionArgs.BatchAccountUrl, cmdLine.ConnectionArgs.BatchAccountName, cmdLine.ConnectionArgs.BatchAccountKey);
                batchClient = BatchClient.Open(cred);

                // Create a Batch pool, VM configuration, Windows Server image
                //bool success = CreateBatchPoolLegacy(batchClient, poolId, cmdLine.BatchArgs.BatchNodeCount,cmdLine.BatchArgs.BatchVmSize,cmdLine.BatchArgs.BatchPoolOs);
                bool success = await CreateBatchPool(cmdLine, poolId);


                // The collection of data files that are to be processed by the tasks
                List <string> inputFilePaths = new List <string>();
                if (!string.IsNullOrEmpty(cmdLine.DacPacArgs.PlatinumDacpac))
                {
                    inputFilePaths.Add(cmdLine.DacPacArgs.PlatinumDacpac);
                }
                if (!string.IsNullOrEmpty(cmdLine.BuildFileName))
                {
                    inputFilePaths.Add(cmdLine.BuildFileName);
                }
                if (!string.IsNullOrEmpty(cmdLine.MultiDbRunConfigFileName))
                {
                    inputFilePaths.Add(cmdLine.MultiDbRunConfigFileName);
                }
                if (!string.IsNullOrEmpty(this.queryFile))
                {
                    inputFilePaths.Add(this.queryFile);
                }


                //Get the list of DB targets and distribute across batch count
                var splitTargets = new List <string[]>();
                if (string.IsNullOrEmpty(cmdLine.ConnectionArgs.ServiceBusTopicConnectionString))
                {
                    int valRet = Validation.ValidateAndLoadMultiDbData(cmdLine.MultiDbRunConfigFileName, null, out MultiDbData multiDb, out errorMessages);
                    List <IEnumerable <(string, List <DatabaseOverride>)> > concurrencyBuckets = null;
                    if (valRet == 0)
                    {
                        if (cmdLine.ConcurrencyType == ConcurrencyType.Count)
                        {
                            //If it's just by count.. split evenly by the number of nodes
                            concurrencyBuckets = Concurrency.ConcurrencyByType(multiDb, cmdLine.BatchArgs.BatchNodeCount, cmdLine.ConcurrencyType);
                        }
                        else
                        {
                            //splitting by server is a little trickier, but run it and see what it does...
                            concurrencyBuckets = Concurrency.ConcurrencyByType(multiDb, cmdLine.Concurrency, cmdLine.ConcurrencyType);
                        }

                        //If we end up with fewer splits, then reduce the node count...
                        if (concurrencyBuckets.Count() < cmdLine.BatchArgs.BatchNodeCount)
                        {
                            log.LogWarning($"NOTE! The number of targets ({concurrencyBuckets.Count()}) is less than the requested node count ({cmdLine.BatchArgs.BatchNodeCount}). Changing the pool node count to {concurrencyBuckets.Count()}");
                            cmdLine.BatchArgs.BatchNodeCount = concurrencyBuckets.Count();
                        }
                        else if (concurrencyBuckets.Count() > cmdLine.BatchArgs.BatchNodeCount) //need to do some consolidating
                        {
                            log.LogWarning($"NOTE! When splitting by {cmdLine.ConcurrencyType.ToString()}, the number of targets ({concurrencyBuckets.Count()}) is greater than the requested node count ({cmdLine.BatchArgs.BatchNodeCount}). Will consolidate to fit within the number of nodes");
                            concurrencyBuckets = Concurrency.RecombineServersToFixedBucketCount(multiDb, cmdLine.BatchArgs.BatchNodeCount);
                        }
                    }
                    else
                    {
                        throw new ArgumentException($"Error parsing database targets. {String.Join(Environment.NewLine, errorMessages)}");
                    }
                    splitTargets = Concurrency.ConvertBucketsToConfigLines(concurrencyBuckets);

                    //Write out each split file
                    string rootPath = Path.GetDirectoryName(cmdLine.MultiDbRunConfigFileName);
                    for (int i = 0; i < splitTargets.Count; i++)
                    {
                        var tmpName = Path.Combine(rootPath, string.Format(baseTargetFormat, i));
                        File.WriteAllLines(tmpName, splitTargets[i]);
                        inputFilePaths.Add(tmpName);
                    }
                }

                // Upload the data files to Azure Storage. This is the data that will be processed by each of the tasks that are
                // executed on the compute nodes within the pool.
                List <ResourceFile> inputFiles = new List <ResourceFile>();

                foreach (string filePath in inputFilePaths)
                {
                    inputFiles.Add(StorageManager.UploadFileToBatchContainer(cmdLine.ConnectionArgs.StorageAccountName, storageContainerName, storageCreds, filePath));
                }

                //Create the individual command lines for each node
                IList <string> commandLines = CompileCommandLines(cmdLine, inputFiles, containerSasToken, cmdLine.BatchArgs.BatchNodeCount, jobId, cmdLine.BatchArgs.BatchPoolOs, applicationPackage, this.batchType);
                foreach (var s in commandLines)
                {
                    log.LogDebug(s);
                }

                try
                {
                    // Create a Batch job
                    log.LogInformation($"Creating job [{jobId}]...");
                    CloudJob job = batchClient.JobOperations.CreateJob();
                    job.Id = jobId;
                    job.PoolInformation = new PoolInformation {
                        PoolId = poolId
                    };
                    job.Commit();
                }
                catch (BatchException be)
                {
                    // Accept the specific error code JobExists as that is expected if the job already exists
                    if (be.RequestInformation?.BatchError?.Code == BatchErrorCodeStrings.JobExists)
                    {
                        log.LogInformation($"The job {jobId} already existed when we tried to create it");
                    }
                    else
                    {
                        throw; // Any other exception is unexpected
                    }
                }

                // Create a collection to hold the tasks that we'll be adding to the job
                if (splitTargets.Count != 0)
                {
                    log.LogInformation($"Adding {splitTargets.Count} tasks to job [{jobId}]...");
                }
                else if (!string.IsNullOrWhiteSpace(cmdLine.ConnectionArgs.ServiceBusTopicConnectionString))
                {
                    log.LogInformation($"Adding tasks to job [{jobId}]...");
                }

                List <CloudTask> tasks = new List <CloudTask>();

                // Create each of the tasks to process on each node
                for (int i = 0; i < commandLines.Count; i++)
                {
                    string taskId          = String.Format($"Task{i}");
                    string taskCommandLine = commandLines[i];

                    CloudTask task = new CloudTask(taskId, taskCommandLine);
                    task.ResourceFiles = inputFiles;
                    task.ApplicationPackageReferences = new List <ApplicationPackageReference>
                    {
                        new ApplicationPackageReference {
                            ApplicationId = applicationPackage
                        }
                    };
                    task.OutputFiles = new List <OutputFile>
                    {
                        new OutputFile(
                            filePattern: @"../std*.txt",
                            destination: new OutputFileDestination(new OutputFileBlobContainerDestination(containerUrl: containerSasToken, path: taskId)),
                            uploadOptions: new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion))    //,

                        //new OutputFile(
                        //    filePattern: @"../wd/*",
                        //    destination: new OutputFileDestination(new OutputFileBlobContainerDestination(containerUrl: containerSasToken, path: $"{taskId}/wd")),
                        //    uploadOptions: new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion)),

                        //new OutputFile(
                        //    filePattern: @"../wd/working/*",
                        //    destination: new OutputFileDestination(new OutputFileBlobContainerDestination(containerUrl: containerSasToken, path: $"{taskId}/working")),
                        //    uploadOptions: new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion))//,

                        //new OutputFile(
                        //   filePattern: @"../*.cfg",
                        //   destination: new OutputFileDestination(new OutputFileBlobContainerDestination(containerUrl: containerSasToken)),
                        //   uploadOptions: new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion))
                    };

                    tasks.Add(task);
                }

                // Add all tasks to the job.
                batchClient.JobOperations.AddTask(jobId, tasks);

                // Monitor task success/failure, specifying a maximum amount of time to wait for the tasks to complete.
                TimeSpan timeout = TimeSpan.FromMinutes(30);
                log.LogInformation($"Monitoring all tasks for 'Completed' state, timeout in {timeout}...");

                if (this.BatchProcessStartedEvent != null)
                {
                    this.BatchProcessStartedEvent(this, new BatchMonitorEventArgs(this.cmdLine, stream, unittest));
                }

                IEnumerable <CloudTask> addedTasks = batchClient.JobOperations.ListTasks(jobId);

                batchClient.Utilities.CreateTaskStateMonitor().WaitAll(addedTasks, TaskState.Completed, timeout);

                if (this.BatchExecutionCompletedEvent != null)
                {
                    this.BatchExecutionCompletedEvent(this, new BatchMonitorEventArgs(this.cmdLine, stream, unittest));
                }

                log.LogInformation("All tasks reached state Completed.");

                // Print task output
                log.LogInformation("Printing task output...");

                IEnumerable <CloudTask> completedtasks = batchClient.JobOperations.ListTasks(jobId);

                foreach (CloudTask task in completedtasks)
                {
                    string nodeId = String.Format(task.ComputeNodeInformation.ComputeNodeId);
                    log.LogInformation("---------------------------------");
                    log.LogInformation($"Task: {task.Id}");
                    log.LogInformation($"Node: {nodeId}");
                    log.LogInformation($"Exit Code: {task.ExecutionInformation.ExitCode}");
                    if (isDebug)
                    {
                        log.LogDebug("Standard out:");
                        log.LogDebug(task.GetNodeFile(Constants.StandardOutFileName).ReadAsString());
                    }
                    if (task.ExecutionInformation.ExitCode != 0)
                    {
                        myExitCode = task.ExecutionInformation.ExitCode;
                    }
                }
                log.LogInformation("---------------------------------");

                // Print out some timing info
                timer.Stop();
                log.LogInformation($"Batch job end: {DateTime.Now}");
                log.LogInformation($"Elapsed time: {timer.Elapsed}");


                // Clean up Batch resources
                if (cmdLine.BatchArgs.DeleteBatchJob)
                {
                    batchClient.JobOperations.DeleteJob(jobId);
                }
                if (cmdLine.BatchArgs.DeleteBatchPool)
                {
                    batchClient.PoolOperations.DeletePool(poolId);
                }

                SqlBuildManager.Logging.Threaded.Configure.CloseAndFlushAllLoggers();
                log.LogInformation("Consolidating log files");
                StorageManager.ConsolidateLogFiles(storageSvcClient, storageContainerName, inputFilePaths);

                if (batchType == BatchType.Query)
                {
                    StorageManager.CombineQueryOutputfiles(storageSvcClient, storageContainerName, this.outputFile);
                }

                //Finish the job out
                if (myExitCode == 0)
                {
                    log.LogInformation($"Setting job {jobId} status to Finished");
                    CloudJob j = batchClient.JobOperations.GetJob(jobId);
                    j.Terminate("Finished");
                }
                else
                {
                    log.LogInformation($"Setting job {jobId} status to exit code: {myExitCode}");
                    CloudJob j = batchClient.JobOperations.GetJob(jobId);
                    j.Terminate("Error");
                }


                readOnlySasToken = StorageManager.GetOutputContainerSasUrl(cmdLine.ConnectionArgs.StorageAccountName, storageContainerName, storageCreds, true);
                log.LogInformation($"Log files can be found here: {readOnlySasToken}");
                log.LogInformation("The read-only SAS token URL is valid for 7 days.");
                log.LogInformation("You can download \"Azure Storage Explorer\" from here: https://azure.microsoft.com/en-us/features/storage-explorer/");
                log.LogInformation("You can also get details on your Azure Batch execution from the \"Azure Batch Explorer\" found here: https://azure.github.io/BatchExplorer/");
            }
            catch (Exception exe)
            {
                log.LogError($"Exception when running batch job\r\n{exe.ToString()}");
                log.LogInformation($"Setting job {jobId} status to Failed");
                try
                {
                    CloudJob j = batchClient.JobOperations.GetJob(jobId);
                    j.Terminate("Failed");
                }
                catch { }
                myExitCode = 486;
            }
            finally
            {
                log.LogInformation("Batch complete");
                if (batchClient != null)
                {
                    batchClient.Dispose();
                }
            }

            if (myExitCode.HasValue)
            {
                log.LogInformation($"Exit Code: {myExitCode.Value}");
                return(myExitCode.Value, readOnlySasToken);
            }
            else
            {
                log.LogInformation($"Exit Code: {-100009}");
                return(-100009, readOnlySasToken);
            }
        }