private async Async.Task <HttpResponseData> Post(HttpRequestData req) { var request = await RequestHandling.ParseRequest <NodeStateEnvelope>(req); if (!request.IsOk) { return(await _context.RequestHandling.NotOk(req, request.ErrorV, context : "node event")); } var envelope = request.OkV; _log.Info($"node event: machine_id: {envelope.MachineId} event: {EntityConverter.ToJsonString(envelope)}"); var error = envelope.Event switch { NodeStateUpdate updateEvent => await OnStateUpdate(envelope.MachineId, updateEvent), WorkerEvent workerEvent => await OnWorkerEvent(envelope.MachineId, workerEvent), NodeEvent nodeEvent => await OnNodeEvent(envelope.MachineId, nodeEvent), _ => new Error(ErrorCode.INVALID_REQUEST, new string[] { $"invalid node event: {envelope.Event.GetType().Name}" }), }; if (error is Error e) { return(await _context.RequestHandling.NotOk(req, e, context : "node event")); } else { return(await RequestHandling.Ok(req, new BoolResult(true))); } }
public async Task <HttpResponseData> GetSubscription([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/creds/subscription")] HttpRequestData req) { _log.Info("Get subscription"); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(_creds.GetSubscription().ToString()); return(resp); }
public async Task <HttpResponseData> GetMonitorSettings([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/logAnalytics/monitorSettings")] HttpRequestData req) { _log.Info("Get monitor settings"); var monitorSettings = await _logAnalytics.GetMonitorSettings(); var msg = JsonSerializer.Serialize(monitorSettings, EntityConverter.GetJsonSerializerOptions()); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(msg); return(resp); }
public async Task <HttpResponseData> ListInstanceIds([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/vmssOperations/listInstanceIds")] HttpRequestData req) { _log.Info("list instance ids"); var query = UriExtension.GetQueryComponents(req.Url); var name = UriExtension.GetGuid("name", query) ?? throw new Exception("name must be set"); var ids = await _vmssOps.ListInstanceIds(name); var json = JsonSerializer.Serialize(ids, EntityConverter.GetJsonSerializerOptions()); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(json); return(resp); }
public async Task <HttpResponseData> LogEvent([HttpTrigger(AuthorizationLevel.Anonymous, "put", Route = "testhooks/events/logEvent")] HttpRequestData req) { _log.Info("Log event"); var s = await req.ReadAsStringAsync(); var msg = JsonSerializer.Deserialize <EventMessage>(s !, EntityConverter.GetJsonSerializerOptions()); _events.LogEvent(msg !.Event); var resp = req.CreateResponse(HttpStatusCode.OK); return(resp); }
public async Task <HttpResponseData> Info([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/info")] HttpRequestData req) { _log.Info("Creating function info response"); var response = req.CreateResponse(); FunctionInfo info = new( $"{_config.OneFuzzInstanceName}", $"{_config.OneFuzzResourceGroup}", Environment.GetEnvironmentVariable("WEBSITE_SLOT_NAME")); _log.Info("Returning function info"); await response.WriteAsJsonAsync(info); _log.Info("Returned function info"); return(response); }
public async Task <HttpResponseData> NewFiles([HttpTrigger(AuthorizationLevel.Anonymous, "put", Route = "testhooks/notificationOperations/newFiles")] HttpRequestData req) { _log.Info("new files"); var query = UriExtension.GetQueryComponents(req.Url); var container = query["container"]; var fileName = query["fileName"]; var failTaskOnTransientError = UriExtension.GetBoolValue("failTaskOnTransientError", query, true); await _notificationOps.NewFiles(new Container(container), fileName, failTaskOnTransientError); var resp = req.CreateResponse(HttpStatusCode.OK); return(resp); }
public async Task <HttpResponseData> GetJob([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/jobOps/job")] HttpRequestData req) { _log.Info("Get job info"); var query = UriExtension.GetQueryComponents(req.Url); var jobId = Guid.Parse(query["jobId"]); var job = await _jobOps.Get(jobId); var msg = JsonSerializer.Serialize(job, EntityConverter.GetJsonSerializerOptions()); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(msg); return(resp); }
public async Async.Task Run([QueueTrigger("proxy", Connection = "AzureWebJobsStorage")] string msg) { _log.Info($"heartbeat: {msg}"); var hb = JsonSerializer.Deserialize <ProxyHeartbeat>(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}");; var newHb = hb with { TimeStamp = DateTimeOffset.UtcNow }; var proxy = await _proxy.GetByProxyId(newHb.ProxyId); var log = _log.WithTag("ProxyId", newHb.ProxyId.ToString()); if (proxy == null) { log.Warning($"invalid proxy id: {newHb.ProxyId}"); return; } var newProxy = proxy with { Heartbeat = newHb }; var r = await _proxy.Replace(newProxy); if (!r.IsOk) { var(status, reason) = r.ErrorV; log.Error($"Failed to replace proxy heartbeat record due to [{status}] {reason}"); } } }
public async Task <HttpResponseData> GetPool([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/poolOperations/pool")] HttpRequestData req) { _log.Info("get pool"); var query = UriExtension.GetQueryComponents(req.Url); var poolRes = await _poolOps.GetByName(PoolName.Parse(query["name"])); if (poolRes.IsOk) { var resp = req.CreateResponse(HttpStatusCode.OK); var data = poolRes.OkV; var msg = JsonSerializer.Serialize(data, EntityConverter.GetJsonSerializerOptions()); await resp.WriteStringAsync(msg); return(resp); } else { var resp = req.CreateResponse(HttpStatusCode.BadRequest); var msg = JsonSerializer.Serialize(poolRes.ErrorV, EntityConverter.GetJsonSerializerOptions()); await resp.WriteStringAsync(msg); return(resp); } }
public async Task <HttpResponseData> GetPublicNic([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/ipOps/publicNic")] HttpRequestData req) { _log.Info("Get public nic"); var query = UriExtension.GetQueryComponents(req.Url); var rg = query["rg"]; var name = query["name"]; var nic = await _ipOps.GetPublicNic(rg, name); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(nic.Get().Value.Data.Name); return(resp); }
//[Function("QueueNodeHearbeat")] public async Async.Task Run([QueueTrigger("node-heartbeat", Connection = "AzureWebJobsStorage")] string msg) { _log.Info($"heartbeat: {msg}"); var nodes = _context.NodeOperations; var events = _context.Events; var hb = JsonSerializer.Deserialize <NodeHeartbeatEntry>(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); var node = await nodes.GetByMachineId(hb.NodeId); var log = _log.WithTag("NodeId", hb.NodeId.ToString()); if (node == null) { log.Warning($"invalid node id: {hb.NodeId}"); return; } var newNode = node with { Heartbeat = DateTimeOffset.UtcNow }; var r = await nodes.Replace(newNode); if (!r.IsOk) { var(status, reason) = r.ErrorV; log.Error($"Failed to replace heartbeat info due to [{status}] {reason}"); } // TODO: do we still send event if we fail do update the table ? await events.SendEvent(new EventNodeHeartbeat(node.MachineId, node.ScalesetId, node.PoolName)); } }
//[Function("QueueWebhooks")] public async Async.Task Run([QueueTrigger("myqueue-items", Connection = "AzureWebJobsStorage")] string msg) { _log.Info($"Webhook Message Queued: {msg}"); var obj = JsonSerializer.Deserialize <WebhookMessageQueueObj>(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); await _webhookMessageLog.ProcessFromQueue(obj); }
public async Task <HttpResponseData> GetSubscription([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/disks")] HttpRequestData req) { _log.Info("Get disk names"); var resp = req.CreateResponse(HttpStatusCode.OK); var diskNames = _diskOps.ListDisks(_creds.GetBaseResourceGroup()).ToList().Select(x => x.Data.Name); await resp.WriteAsJsonAsync(diskNames); return(resp); }
public async Task <bool> DeleteDisk(string resourceGroup, string name) { try { _logTracer.Info($"deleting disks {resourceGroup} : {name}"); var disk = await _creds.GetResourceGroupResource().GetDiskAsync(name); if (disk != null) { await disk.Value.DeleteAsync(WaitUntil.Started); return(true); } } catch (Exception e) { _logTracer.Error($"unable to delete disk: {name} {e.Message}"); _logTracer.Exception(e); } return(false); }
public async Task <HttpResponseData> GetInstanceId([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/containers/instanceId")] HttpRequestData req) { _log.Info("Get instance ID"); var instanceId = await _containers.GetInstanceId(); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(instanceId.ToString()); return(resp); }
public async Async.Task Run([TimerTrigger("00:01:30")] TimerInfo t) { // NOTE: Update pools first, such that scalesets impacted by pool updates // (such as shutdown or resize) happen during this iteration `timer_worker` // rather than the following iteration. var pools = _poolOps.SearchAll(); await foreach (var pool in pools) { if (PoolStateHelper.NeedsWork.Contains(pool.State)) { _log.Info($"update pool: {pool.PoolId} ({pool.Name})"); await _poolOps.ProcessStateUpdate(pool); } } // NOTE: Nodes, and Scalesets should be processed in a consistent order such // during 'pool scale down' operations. This means that pools that are // scaling down will more likely remove from the same scalesets over time. // By more likely removing from the same scalesets, we are more likely to // get to empty scalesets, which can safely be deleted. await _nodeOps.MarkOutdatedNodes(); await _nodeOps.CleanupBusyNodesWithoutWork(); var nodes = _nodeOps.SearchStates(states: NodeStateHelper.NeedsWorkStates); await foreach (var node in nodes) { _log.Info($"update node: {node.MachineId}"); await _nodeOps.ProcessStateUpdate(node); } var scalesets = _scaleSetOps.SearchAll(); await foreach (var scaleset in scalesets) { await ProcessScalesets(scaleset); } }
private async Async.Task <HttpResponseData> Get(HttpRequestData req) { _log.Info("Notification search"); var request = await RequestHandling.ParseUri <NotificationSearch>(req); if (!request.IsOk) { return(await _context.RequestHandling.NotOk(req, request.ErrorV, "notification search")); } var entries = request.OkV switch { { Container : null } => _context.NotificationOperations.SearchAll(), { Container : var c } => _context.NotificationOperations.SearchByRowKeys(c.Select(x => x.ContainerName)) };
public async Task <HttpResponseData> GetNsg([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/nsgOperations/nsg")] HttpRequestData req) { _log.Info("get nsg"); var query = UriExtension.GetQueryComponents(req.Url); var nsg = await _nsgOperations.GetNsg(query["name"]); if (nsg is null) { var resp = req.CreateResponse(HttpStatusCode.NotFound); return(resp); } else { var resp = req.CreateResponse(HttpStatusCode.OK); var data = nsg !.Data; await resp.WriteAsJsonAsync(new { ResourceId = data.ResourceGuid }); return(resp); } }
private async Async.Task file_added(ILogTracer log, JsonDocument fileChangeEvent, bool failTaskOnTransientError) { var data = fileChangeEvent.RootElement.GetProperty("data"); var url = data.GetProperty("url").GetString() !; var parts = url.Split("/").Skip(3).ToList(); var container = parts[0]; var path = string.Join('/', parts.Skip(1)); log.Info($"file added container: {container} - path: {path}"); await _notificationOperations.NewFiles(new Container(container), path, failTaskOnTransientError); }
public async Async.Task ScheduleTasks() { var tasks = await _taskOperations.SearchStates(states : new[] { TaskState.Waiting }).ToDictionaryAsync(x => x.TaskId); var seen = new HashSet <Guid>(); var buckets = BucketTasks(tasks.Values); foreach (var bucketedTasks in buckets) { foreach (var chunks in bucketedTasks.Chunk(MAX_TASKS_PER_SET)) { var result = await BuildWorkSet(chunks); if (result == null) { continue; } var(bucketConfig, workSet) = result.Value; if (await ScheduleWorkset(workSet, bucketConfig.pool, bucketConfig.count)) { foreach (var workUnit in workSet.WorkUnits) { var task1 = tasks[workUnit.TaskId]; Task task = await _taskOperations.SetState(task1, TaskState.Scheduled); seen.Add(task.TaskId); } } } } var notReadyCount = tasks.Count - seen.Count; if (notReadyCount > 0) { _logTracer.Info($"tasks not ready {notReadyCount}"); } }
//[Function("TimerTasks")] public async Async.Task Run([TimerTrigger("1.00:00:00")] TimerInfo myTimer) { var expriredTasks = _taskOperations.SearchExpired(); await foreach (var task in expriredTasks) { _logger.Info($"stopping expired task. job_id:{task.JobId} task_id:{task.TaskId}"); await _taskOperations.MarkStopping(task); } var expiredJobs = _jobOperations.SearchExpired(); await foreach (var job in expiredJobs) { _logger.Info($"stopping expired job. job_id:{job.JobId }"); await _jobOperations.Stopping(job, _taskOperations); } var jobs = _jobOperations.SearchState(states: JobStateHelper.NeedsWork); await foreach (var job in jobs) { _logger.Info($"update job: {job.JobId}"); await _jobOperations.ProcessStateUpdates(job); } var tasks = _taskOperations.SearchStates(states: TaskStateHelper.NeedsWork); await foreach (var task in tasks) { _logger.Info($"update task: {task.TaskId}"); await _taskOperations.ProcessStateUpdate(task); } await _scheduler.ScheduleTasks(); await _jobOperations.StopNeverStartedJobs(); }
public async Async.Task Run([TimerTrigger("00:00:30")] TimerInfo myTimer) { var expired = _onefuzzContext.ReproOperations.SearchExpired(); await foreach (var repro in expired) { _log.Info($"stopping repro: {repro.VmId}"); await _onefuzzContext.ReproOperations.Stopping(repro); } var expiredVmIds = expired.Select(repro => repro?.VmId); await foreach (var repro in _onefuzzContext.ReproOperations.SearchStates(VmStateHelper.NeedsWork)) { if (await expiredVmIds.ContainsAsync(repro.VmId)) { // this VM already got processed during the expired phase continue; } _log.Info($"update repro: {repro.VmId}"); await _onefuzzContext.ReproOperations.ProcessStateUpdates(repro); } }
public async Task <HttpResponseData> GenericExtensions([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/extensions/genericExtensions")] HttpRequestData req) { _log.Info("Get Generic extensions"); var query = UriExtension.GetQueryComponents(req.Url); Os os = Enum.Parse <Os>(query["os"]); var ext = await(_extensions as Extensions) !.GenericExtensions(query["region"], os); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteAsJsonAsync(ext); return(resp); }
private async Async.Task <BlobServiceClient?> GetBlobService(string accountId) { _log.Info($"getting blob container (account_id: {accountId}"); var(accountName, accountKey) = await _storage.GetStorageAccountNameAndKey(accountId); if (accountName == null) { _log.Error("Failed to get storage account name"); return(null); } var storageKeyCredential = new StorageSharedKeyCredential(accountName, accountKey); var accountUrl = GetUrl(accountName); return(new BlobServiceClient(accountUrl, storageKeyCredential)); }
public async Async.Task <OneFuzzResultVoid> UpdateExtensions(Guid name, IList <VirtualMachineScaleSetExtensionData> extensions) { var canUpdate = await CheckCanUpdate(name); if (canUpdate.IsOk) { _log.Info($"updating VM extensions: {name}"); var res = GetVmssResource(name); var patch = new VirtualMachineScaleSetPatch(); foreach (var ext in extensions) { patch.VirtualMachineProfile.ExtensionProfile.Extensions.Add(ext); } var _ = await res.UpdateAsync(WaitUntil.Started, patch); _log.Info($"VM extensions updated: {name}"); return(OneFuzzResultVoid.Ok()); } else { return(OneFuzzResultVoid.Error(canUpdate.ErrorV)); } }
public async Task <HttpResponseData> Get([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/instance-config")] HttpRequestData req) { _log.Info("Fetching instance config"); var config = await _configOps.Fetch(); if (config is null) { _log.Error("Instance config is null"); Error err = new(ErrorCode.INVALID_REQUEST, new[] { "Instance config is null" }); var resp = req.CreateResponse(HttpStatusCode.InternalServerError); await resp.WriteAsJsonAsync(err); return(resp); } else { var str = EntityConverter.ToJsonString(config); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(str); return(resp); } }
public async Async.Task UpdateConfigs(Scaleset scaleSet) { if (scaleSet == null) { _log.Warning("skipping update configs on scaleset, since scaleset is null"); return; } if (scaleSet.State == ScalesetState.Halt) { _log.Info($"{SCALESET_LOG_PREFIX} not updating configs, scalest is set to be deleted. scaleset_id: {scaleSet.ScalesetId}"); return; } if (!scaleSet.NeedsConfigUpdate) { _log.Verbose($"{SCALESET_LOG_PREFIX} config update no needed. scaleset_id: {scaleSet.ScalesetId}"); return; } _log.Info($"{SCALESET_LOG_PREFIX} updating scalset configs. scalset_id: {scaleSet.ScalesetId}"); var pool = await _context.PoolOperations.GetByName(scaleSet.PoolName); if (!pool.IsOk || pool.OkV is null) { _log.Error($"{SCALESET_LOG_PREFIX} unable to find pool during config update. pool:{scaleSet.PoolName}, scaleset_id:{scaleSet.ScalesetId}"); await SetFailed(scaleSet, pool.ErrorV !); return; } var extensions = await _context.Extensions.FuzzExtensions(pool.OkV, scaleSet); var res = await _context.VmssOperations.UpdateExtensions(scaleSet.ScalesetId, extensions); if (!res.IsOk) { _log.Info($"{SCALESET_LOG_PREFIX} unable to update configs {string.Join(',', res.ErrorV.Errors!)}"); } }
public async Async.Task <bool> DeleteVmComponents(string name, Nsg?nsg) { var resourceGroup = _creds.GetBaseResourceGroup(); _logTracer.Info($"deleting vm components {resourceGroup}:{name}"); if (GetVm(name) != null) { _logTracer.Info($"deleting vm {resourceGroup}:{name}"); await DeleteVm(name); return(false); } var nic = await _ipOperations.GetPublicNic(resourceGroup, name); if (nic != null) { _logTracer.Info($"deleting nic {resourceGroup}:{name}"); if (nic.Data.NetworkSecurityGroup != null && nsg != null) { await _nsgOperations.DissociateNic((Nsg)nsg, nic); return(false); } await _ipOperations.DeleteNic(resourceGroup, name); return(false); } if (await _ipOperations.GetIp(resourceGroup, name) != null) { _logTracer.Info($"deleting ip {resourceGroup}:{name}"); await _ipOperations.DeleteIp(resourceGroup, name); return(false); } var disks = _diskOperations.ListDisks(resourceGroup) .ToAsyncEnumerable() .Where(disk => disk.Data.Name.StartsWith(name)); if (await disks.AnyAsync()) { await foreach (var disk in disks) { _logTracer.Info($"deleting disk {resourceGroup}:{disk?.Data.Name}"); await _diskOperations.DeleteDisk(resourceGroup, disk?.Data.Name !); } return(false); } return(true); }
public async Task <HttpResponseData> SearchForward([HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "testhooks/proxyForwardOperations/search")] HttpRequestData req) { _log.Info("search proxy forward"); var query = UriExtension.GetQueryComponents(req.Url); var poolRes = _proxyForward.SearchForward( UriExtension.GetGuid("scaleSetId", query), UriExtension.GetString("region", query), UriExtension.GetGuid("machineId", query), UriExtension.GetGuid("proxyId", query), UriExtension.GetInt("dstPort", query)); var json = JsonSerializer.Serialize(await poolRes.ToListAsync(), EntityConverter.GetJsonSerializerOptions()); var resp = req.CreateResponse(HttpStatusCode.OK); await resp.WriteStringAsync(json); return(resp); }