// TODO: This method, or an abstracted version of it, should move to Speckle Core.
        public async void SendStaggeredUpdate(bool force = false)
        {
            if (Paused && !force)
            {
                Context.NotifySpeckleFrame("client-expired", StreamId, "");
                return;
            }

            if (IsSendingUpdate)
            {
                Expired = true;
                return;
            }

            IsSendingUpdate = true;

            Context.NotifySpeckleFrame("client-is-loading", StreamId, "");

            var objs = RhinoDoc.ActiveDoc.Objects.FindByUserString("spk_" + this.StreamId, "*", false).OrderBy(obj => obj.Attributes.LayerIndex);

            Context.NotifySpeckleFrame("client-progress-message", StreamId, "Converting " + objs.Count() + " objects...");

            List <SpeckleLayer>           pLayers              = new List <SpeckleLayer>();
            List <SpeckleObject>          convertedObjects     = new List <SpeckleObject>();
            List <PayloadMultipleObjects> objectUpdatePayloads = new List <PayloadMultipleObjects>();

            long totalBucketSize   = 0;
            long currentBucketSize = 0;
            List <SpeckleObject> currentBucketObjects = new List <SpeckleObject>();
            List <SpeckleObject> allObjects           = new List <SpeckleObject>();

            int lindex = -1, count = 0, orderIndex = 0;

            foreach (RhinoObject obj in objs)
            {
                // layer list creation
                Layer layer = RhinoDoc.ActiveDoc.Layers[obj.Attributes.LayerIndex];
                if (lindex != obj.Attributes.LayerIndex)
                {
                    var spkLayer = new SpeckleLayer()
                    {
                        Name        = layer.FullPath,
                        Guid        = layer.Id.ToString(),
                        ObjectCount = 1,
                        StartIndex  = count,
                        OrderIndex  = orderIndex++,
                        Properties  = new SpeckleLayerProperties()
                        {
                            Color = new SpeckleCore.Color()
                            {
                                A = 1, Hex = System.Drawing.ColorTranslator.ToHtml(layer.Color)
                            },
                        }
                    };

                    pLayers.Add(spkLayer);
                    lindex = obj.Attributes.LayerIndex;
                }
                else
                {
                    var spkl = pLayers.FirstOrDefault(pl => pl.Name == layer.FullPath);
                    spkl.ObjectCount++;
                }

                count++;

                // object conversion
                SpeckleObject convertedObject;

                convertedObject = Converter.Serialise(obj.Geometry);
                convertedObject.ApplicationId = obj.Id.ToString();
                allObjects.Add(convertedObject);

                Context.NotifySpeckleFrame("client-progress-message", StreamId, "Converted " + count + " objects out of " + objs.Count() + ".");

                // check cache and see what the response from the server is when sending placeholders
                // in the ObjectCreateBulkAsyncRoute
                if (Context.SpeckleObjectCache.ContainsKey(convertedObject.Hash))
                {
                    convertedObject = new SpeckleObjectPlaceholder()
                    {
                        Hash = convertedObject.Hash, DatabaseId = Context.SpeckleObjectCache[convertedObject.Hash].DatabaseId, ApplicationId = Context.SpeckleObjectCache[convertedObject.Hash].ApplicationId
                    };
                }

                // size checking & bulk object creation payloads creation
                long size = Converter.getBytes(convertedObject).Length;
                currentBucketSize += size;
                totalBucketSize   += size;
                currentBucketObjects.Add(convertedObject);

                if (currentBucketSize > 2e6)
                {
                    // means we're around fooking bazillion mb of an upload. FAIL FAIL FAIL
                    Context.NotifySpeckleFrame("client-error", StreamId, JsonConvert.SerializeObject("This stream contains a super big object. These are not supported yet :("));
                    Context.NotifySpeckleFrame("client-done-loading", StreamId, "");
                    IsSendingUpdate = false;
                    return;
                }

                if (currentBucketSize > 5e5) // restrict max to ~500kb; should it be user config? anyway these functions should go into core. at one point.
                {
                    Debug.WriteLine("Reached payload limit. Making a new one, current  #: " + objectUpdatePayloads.Count);
                    objectUpdatePayloads.Add(new PayloadMultipleObjects()
                    {
                        Objects = currentBucketObjects.ToArray()
                    });
                    currentBucketObjects = new List <SpeckleObject>();
                    currentBucketSize    = 0;
                }

                // catch overflows early
                if (totalBucketSize >= 50e6)
                {
                    Context.NotifySpeckleFrame("client-error", StreamId, JsonConvert.SerializeObject("This is a humongous update, in the range of ~50mb. For now, create more streams instead of just one massive one! Updates will be faster and snappier, and you can combine them back together at the other end easier. " + totalBucketSize / 1000 + "(kb)"));
                    IsSendingUpdate = false;
                    Context.NotifySpeckleFrame("client-done-loading", StreamId, "");
                    return;
                }
            }

            // last bucket
            if (currentBucketObjects.Count > 0)
            {
                objectUpdatePayloads.Add(new PayloadMultipleObjects()
                {
                    Objects = currentBucketObjects.ToArray()
                });
            }

            Debug.WriteLine("Finished, payload object update count is: " + objectUpdatePayloads.Count + " total bucket size is (kb) " + totalBucketSize / 1000);

            if (objectUpdatePayloads.Count > 100 || totalBucketSize >= 50e6)
            {
                // means we're around fooking bazillion mb of an upload. FAIL FAIL FAIL
                Context.NotifySpeckleFrame("client-error", StreamId, JsonConvert.SerializeObject("This is a humongous update, in the range of ~50mb. For now, create more streams instead of just one massive one! Updates will be faster and snappier, and you can combine them back together at the other end easier. " + totalBucketSize / 1000 + "(kb)"));
                IsSendingUpdate = false;
                Context.NotifySpeckleFrame("client-done-loading", StreamId, "");
                return;
            }

            // create bulk object creation tasks
            int k = 0;
            List <ResponsePostObjects> responses = new List <ResponsePostObjects>();

            foreach (var payload in objectUpdatePayloads)
            {
                Context.NotifySpeckleFrame("client-progress-message", StreamId, String.Format("Sending payload {0} out of {1}", k++, objectUpdatePayloads.Count));
                try
                {
                    responses.Add(await Client.ObjectCreateBulkAsync(payload));
                }
                catch (Exception err)
                {
                    Context.NotifySpeckleFrame("client-error", Client.Stream.StreamId, JsonConvert.SerializeObject(err.Message));
                    Context.NotifySpeckleFrame("client-done-loading", StreamId, "");
                    IsSendingUpdate = false;
                    return;
                }
            }

            Context.NotifySpeckleFrame("client-progress-message", StreamId, "Updating stream...");

            // finalise layer creation
            foreach (var layer in pLayers)
            {
                layer.Topology = "0-" + layer.ObjectCount + " ";
            }

            // create placeholders for stream update payload
            List <SpeckleObjectPlaceholder> placeholders = new List <SpeckleObjectPlaceholder>();
            int m = 0;

            foreach (var myResponse in responses)
            {
                foreach (string dbId in myResponse.Objects)
                {
                    placeholders.Add(new SpeckleObjectPlaceholder()
                    {
                        DatabaseId = dbId, ApplicationId = allObjects[m++].ApplicationId
                    });
                }
            }

            // create stream update payload
            PayloadStreamUpdate streamUpdatePayload = new PayloadStreamUpdate();

            streamUpdatePayload.Layers  = pLayers;
            streamUpdatePayload.Objects = placeholders;
            streamUpdatePayload.Name    = Client.Stream.Name;

            // set some base properties (will be overwritten)
            var baseProps = new Dictionary <string, object>();

            baseProps["units"]                 = RhinoDoc.ActiveDoc.ModelUnitSystem.ToString();
            baseProps["tolerance"]             = RhinoDoc.ActiveDoc.ModelAbsoluteTolerance;
            baseProps["angleTolerance"]        = RhinoDoc.ActiveDoc.ModelAngleToleranceRadians;
            streamUpdatePayload.BaseProperties = baseProps;

            // push it to the server yo!
            ResponseStreamUpdate response = null;

            try
            {
                response = await Client.StreamUpdateAsync(streamUpdatePayload, Client.Stream.StreamId);
            }
            catch (Exception err)
            {
                Context.NotifySpeckleFrame("client-error", Client.Stream.StreamId, JsonConvert.SerializeObject(err.Message));
                IsSendingUpdate = false;
                return;
            }

            // put the objects in the cache
            int l = 0;

            foreach (var obj in streamUpdatePayload.Objects)
            {
                obj.DatabaseId = response.Objects[l];
                Context.SpeckleObjectCache[allObjects[l].Hash] = placeholders[l];
                l++;
            }

            // emit  events, etc.
            Client.Stream.Layers  = streamUpdatePayload.Layers.ToList();
            Client.Stream.Objects = streamUpdatePayload.Objects.Select(o => o.ApplicationId).ToList();

            Context.NotifySpeckleFrame("client-metadata-update", StreamId, Client.Stream.ToJson());
            Context.NotifySpeckleFrame("client-done-loading", StreamId, "");

            Client.BroadcastMessage(new { eventType = "update-global" });

            IsSendingUpdate = false;
            if (Expired)
            {
                DataSender.Start();
            }
            Expired = false;
        }
Esempio n. 2
0
        private void DataSender_Elapsed(object sender, ElapsedEventArgs e)
        {
            if (MetadataSender.Enabled)
            {
                //  start the timer again, as we need to make sure we're updating
                DataSender.Start();
                return;
            }

            this.Message = String.Format("Converting {0} \n objects", BucketObjects.Count);

            var convertedObjects = Converter.Serialise(BucketObjects).Select(obj =>
            {
                if (ObjectCache.ContainsKey(obj.Hash))
                {
                    return new SpeckleObjectPlaceholder()
                    {
                        Hash = obj.Hash, DatabaseId = ObjectCache[obj.Hash].DatabaseId
                    }
                }
                ;
                return(obj);
            }).ToList();

            this.Message = String.Format("Creating payloads");

            long totalBucketSize   = 0;
            long currentBucketSize = 0;
            List <PayloadMultipleObjects> objectUpdatePayloads = new List <PayloadMultipleObjects>();
            List <SpeckleObject>          currentBucketObjects = new List <SpeckleObject>();
            List <SpeckleObject>          allObjects           = new List <SpeckleObject>();

            foreach (SpeckleObject convertedObject in convertedObjects)
            {
                long size = Converter.getBytes(convertedObject).Length;
                currentBucketSize += size;
                totalBucketSize   += size;
                currentBucketObjects.Add(convertedObject);

                if (currentBucketSize > 5e5) // restrict max to ~500kb; should it be user config? anyway these functions should go into core. at one point.
                {
                    Debug.WriteLine("Reached payload limit. Making a new one, current  #: " + objectUpdatePayloads.Count);
                    objectUpdatePayloads.Add(new PayloadMultipleObjects()
                    {
                        Objects = currentBucketObjects.ToArray()
                    });
                    currentBucketObjects = new List <SpeckleObject>();
                    currentBucketSize    = 0;
                }
            }

            if (currentBucketObjects.Count > 0)
            {
                objectUpdatePayloads.Add(new PayloadMultipleObjects()
                {
                    Objects = currentBucketObjects.ToArray()
                });
            }

            Debug.WriteLine("Finished, payload object update count is: " + objectUpdatePayloads.Count + " total bucket size is (kb) " + totalBucketSize / 1000);

            if (objectUpdatePayloads.Count > 100)
            {
                this.AddRuntimeMessage(GH_RuntimeMessageLevel.Error, "This is a humongous update, in the range of ~50mb. For now, create more streams instead of just one massive one! Updates will be faster and snappier, and you can combine them back together at the other end easier.");
                return;
            }

            int k = 0;
            List <ResponsePostObjects> responses = new List <ResponsePostObjects>();

            foreach (var payload in objectUpdatePayloads)
            {
                this.Message = String.Format("Sending payload\n{0} / {1}", k++, objectUpdatePayloads.Count);
                responses.Add(mySender.ObjectCreateBulkAsync(payload).GetAwaiter().GetResult());
            }

            this.Message = "Updating stream...";

            // create placeholders for stream update payload
            List <SpeckleObjectPlaceholder> placeholders = new List <SpeckleObjectPlaceholder>();

            foreach (var myResponse in responses)
            {
                foreach (string dbId in myResponse.Objects)
                {
                    placeholders.Add(new SpeckleObjectPlaceholder()
                    {
                        DatabaseId = dbId
                    });
                }
            }

            PayloadStreamUpdate streamUpdatePayload = new PayloadStreamUpdate();

            streamUpdatePayload.Layers  = BucketLayers;
            streamUpdatePayload.Name    = BucketName;
            streamUpdatePayload.Objects = placeholders;

            // set some base properties (will be overwritten)
            var baseProps = new Dictionary <string, object>();

            baseProps["units"]                 = Rhino.RhinoDoc.ActiveDoc.ModelUnitSystem.ToString();
            baseProps["tolerance"]             = Rhino.RhinoDoc.ActiveDoc.ModelAbsoluteTolerance;
            baseProps["angleTolerance"]        = Rhino.RhinoDoc.ActiveDoc.ModelAngleToleranceRadians;
            streamUpdatePayload.BaseProperties = baseProps;

            var response = mySender.StreamUpdate(streamUpdatePayload, mySender.StreamId);

            mySender.BroadcastMessage(new { eventType = "update-global" });

            // put the objects in the cache
            int l = 0;

            foreach (var obj in streamUpdatePayload.Objects)
            {
                obj.DatabaseId = response.Objects[l];
                ObjectCache[convertedObjects[l].Hash] = placeholders[l];
                l++;
            }

            AddRuntimeMessage(GH_RuntimeMessageLevel.Remark, "Data sent at " + DateTime.Now);
            Message = "Data sent\n@" + DateTime.Now.ToString("hh:mm:ss");
        }