コード例 #1
0
        public static void Main(string[] args)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = @"gs://demomaker/cat.mp4",
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.SegmentLabelAnnotations)
                {
                    Console.WriteLine($"Video label: {annotation.Entity.Description}");
                    foreach (var entity in annotation.CategoryEntities)
                    {
                        Console.WriteLine($"Video label category: {entity.Description}");
                    }
                    foreach (var segment in annotation.Segments)
                    {
                        Console.Write("Segment location: ");
                        Console.Write(segment.Segment.StartTimeOffset);
                        Console.Write(":");
                        Console.WriteLine(segment.Segment.EndTimeOffset);
                        System.Console.WriteLine($"Confidence: {segment.Confidence}");
                    }
                }
            }
        }
コード例 #2
0
        public void AnnotateVideo()
        {
            // Sample: AnnotateVideo
            // Additional: AnnotateVideo(string,IEnumerable<Feature>,VideoContext,string,string,CallSettings)
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest {
                InputUri = "gs://cloudmleap/video/next/gbikes_dinosaur.mp4",
                Features = { Feature.LabelDetection }
            };
            var operation = client.AnnotateVideo(request);

            var resultOperation = operation.PollUntilCompleted();

            var result = resultOperation.Result.AnnotationResults[0];

            foreach (var label in result.LabelAnnotations)
            {
                Console.WriteLine($"Label: {label.Description}");
                Console.WriteLine("Locations:");
                foreach (var location in label.Locations)
                {
                    Console.WriteLine($"  {location.Segment.StartTimeOffset}-{location.Segment.EndTimeOffset}: {location.Confidence}");
                }
            }
            // End sample

            Assert.True(result.LabelAnnotations.Any(lab => lab.Description == "Dinosaur"));
        }
コード例 #3
0
        // [END analyze_labels_gcs]

        // [START analyze_faces]
        public static object AnalyzeFaces(string uri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = uri,
                Features = { Feature.FaceDetection }
            };
            var  op        = client.AnnotateVideo(request).PollUntilCompleted();
            char faceLabel = 'A';

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.FaceAnnotations)
                {
                    Console.WriteLine("Face {0} seen at offsets:", faceLabel);
                    foreach (var segment in annotation.Segments)
                    {
                        Console.WriteLine("{0}-{1}", segment.StartTimeOffset, segment.EndTimeOffset);
                    }
                    ++faceLabel;
                }
            }
            return(0);
        }
        /// <summary>Snippet for AnnotateVideo</summary>
        public void AnnotateVideo()
        {
            // Snippet: AnnotateVideo(string,IEnumerable<Feature>,CallSettings)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.Create();
            // Initialize request argument(s)
            string inputUri = "gs://demomaker/cat.mp4";
            IEnumerable <Feature> features = new[]
            {
                Feature.LabelDetection,
            };
            // Make the request
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> response =
                videoIntelligenceServiceClient.AnnotateVideo(inputUri, features);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> completedResponse =
                response.PollUntilCompleted();
            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> retrievedResponse =
                videoIntelligenceServiceClient.PollOnceAnnotateVideo(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
コード例 #5
0
        static void Main()
        {
            Console.WriteLine("Service is running.");

            String envName  = "GOOGLE_APPLICATION_CREDENTIALS";
            String envValue = @"..\..\..\Credentials\json1.json";

            if (Environment.GetEnvironmentVariable(envName) == null)
            {
                Environment.SetEnvironmentVariable(envName, envValue);
            }

            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputContent = Google.Protobuf.ByteString.CopyFrom(File.ReadAllBytes(@"..\..\..\Videos\InputVideo\testInput.mp4")),
                Features     = { Feature.LabelDetection }
            };

            var op = client.AnnotateVideo(request).PollUntilCompleted();

            TimeDetails timeDetails = ConsumeJsonFile.ReturnTimeOffsets("person", op.Result.AnnotationResults);

            string duration = (timeDetails.EndTime - timeDetails.StartTime).ToString();

            VideoEditor.CutVideo(@"..\..\..\Videos\InputVideo\testInput.mp4", timeDetails.StartTime.ToString(), duration, @"..\..\..\Videos\OutputVideo\Output.mp4");

            Console.WriteLine("Video is edited. Press key to exit.");

            Console.ReadKey();
        }
コード例 #6
0
        public void AnnotateVideo()
        {
            // Snippet: AnnotateVideo(string,IEnumerable<Feature>,VideoContext,string,string,CallSettings)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.Create();
            // Initialize request argument(s)
            string inputUri = "";
            IEnumerable <Feature> features     = new List <Feature>();
            VideoContext          videoContext = new VideoContext();
            string outputUri  = "";
            string locationId = "";
            // Make the request
            Operation <AnnotateVideoResponse> response =
                videoIntelligenceServiceClient.AnnotateVideo(inputUri, features, videoContext, outputUri, locationId);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse> completedResponse =
                response.PollUntilCompleted();
            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse> retrievedResponse =
                videoIntelligenceServiceClient.PollOnceAnnotateVideo(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
        public void AnnotateVideo()
        {
            // Sample: AnnotateVideo
            // Additional: AnnotateVideo(string, IEnumerable<Feature>, CallSettings)
            VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.Create();
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> operation = client.AnnotateVideo(
                "gs://cloud-samples-data/video/gbikes_dinosaur.mp4",
                new[] { Feature.LabelDetection });
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> resultOperation = operation.PollUntilCompleted();

            VideoAnnotationResults result = resultOperation.Result.AnnotationResults[0];

            foreach (LabelAnnotation label in result.ShotLabelAnnotations)
            {
                Console.WriteLine($"Label entity: {label.Entity.Description}");
                Console.WriteLine("Frames:");
                foreach (LabelSegment segment in label.Segments)
                {
                    Console.WriteLine($"  {segment.Segment.StartTimeOffset}-{segment.Segment.EndTimeOffset}: {segment.Confidence}");
                }
            }
            // End sample

            Assert.Contains(result.ShotLabelAnnotations, lab => lab.Entity.Description == "dinosaur");
        }
コード例 #8
0
        // [END analyze_labels_gcs]

        // [START analyze_safesearch_gcs]
        public static object AnalyzeSafeSearchGcs(string uri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = uri,
                Features = { Feature.SafeSearchDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (SafeSearchAnnotation annotation in result.SafeSearchAnnotations)
                {
                    Console.WriteLine("Time Offset: {0}", annotation.TimeOffset);
                    Console.WriteLine("Adult: {0}", annotation.Adult);
                    Console.WriteLine("Medical: {0}", annotation.Medical);
                    Console.WriteLine("Racy: {0}", annotation.Racy);
                    Console.WriteLine("Spoof: {0}", annotation.Spoof);
                    Console.WriteLine("Violent: {0}", annotation.Violent);
                    Console.WriteLine();
                }
            }
            return(0);
        }
コード例 #9
0
        /// <summary>Snippet for AnnotateVideoAsync</summary>
        public async Task AnnotateVideoAsync()
        {
            // Snippet: AnnotateVideoAsync(string, IEnumerable<Feature>, CallSettings)
            // Additional: AnnotateVideoAsync(string, IEnumerable<Feature>, CancellationToken)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = await VideoIntelligenceServiceClient.CreateAsync();

            // Initialize request argument(s)
            string inputUri = "";
            IEnumerable <Feature> features = new Feature[]
            {
                Feature.Unspecified,
            };
            // Make the request
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> response = await videoIntelligenceServiceClient.AnnotateVideoAsync(inputUri, features);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> completedResponse = await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> retrievedResponse = await videoIntelligenceServiceClient.PollOnceAnnotateVideoAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
コード例 #10
0
        public static int Main(string[] args)
        {
            // Create client
            VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.Create();

            // Initialize request argument(s)
            string inputUri = "gs://demomaker/cat.mp4";
            IEnumerable <Feature> features = new[]
            {
                Feature.LabelDetection,
            };

            // Call API method
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> operationResponse = client.AnnotateVideo(inputUri, features);

            // Poll the operation until it's complete.
            // This returns a new operation containing the result (or error).
            operationResponse = operationResponse.PollUntilCompleted();
            // Show the result
            Console.WriteLine(operationResponse.Result);

            // Success
            Console.WriteLine("Smoke test passed OK");
            return(0);
        }
コード例 #11
0
        // [END video_object_tracking_gcs]

        // [START video_object_tracking]
        public static object TrackObject(string filePath)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest
            {
                InputContent = Google.Protobuf.ByteString.CopyFrom(File.ReadAllBytes(filePath)),
                Features     = { Feature.ObjectTracking },
                // It is recommended to use location_id as 'us-east1' for the
                // best latency due to different types of processors used in
                // this region and others.
                LocationId = "us-east1"
            };

            Console.WriteLine("\nProcessing video for object annotations.");
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            Console.WriteLine("\nFinished processing.\n");

            // Retrieve first result because a single video was processed.
            var objectAnnotations = op.Result.AnnotationResults[0]
                                    .ObjectAnnotations;

            // Get only the first annotation for demo purposes
            var objAnnotation = objectAnnotations[0];

            Console.WriteLine(
                $"Entity description: {objAnnotation.Entity.Description}");

            if (objAnnotation.Entity.EntityId != null)
            {
                Console.WriteLine(
                    $"Entity id: {objAnnotation.Entity.EntityId}");
            }

            Console.Write($"Segment: ");
            Console.WriteLine(
                String.Format("{0}s to {1}s",
                              objAnnotation.Segment.StartTimeOffset.Seconds +
                              objAnnotation.Segment.StartTimeOffset.Nanos / 1e9,
                              objAnnotation.Segment.EndTimeOffset.Seconds +
                              objAnnotation.Segment.EndTimeOffset.Nanos / 1e9));

            Console.WriteLine($"Confidence: {objAnnotation.Confidence}");

            // Here we print only the bounding box of the first frame in this segment
            var frame = objAnnotation.Frames[0];
            var box   = frame.NormalizedBoundingBox;

            Console.WriteLine(
                String.Format("Time offset of the first frame: {0}s",
                              frame.TimeOffset.Seconds +
                              frame.TimeOffset.Nanos / 1e9));
            Console.WriteLine("Bounding box positions:");
            Console.WriteLine($"\tleft   : {box.Left}");
            Console.WriteLine($"\ttop    : {box.Top}");
            Console.WriteLine($"\tright  : {box.Right}");
            Console.WriteLine($"\tbottom : {box.Bottom}");

            return(0);
        }
        /// <summary>Snippet for AnnotateVideo</summary>
        public void AnnotateVideo_RequestObject()
        {
            // Snippet: AnnotateVideo(AnnotateVideoRequest,CallSettings)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.Create();
            // Initialize request argument(s)
            AnnotateVideoRequest request = new AnnotateVideoRequest();
            // Make the request
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> response =
                videoIntelligenceServiceClient.AnnotateVideo(request);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> completedResponse =
                response.PollUntilCompleted();
            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> retrievedResponse =
                videoIntelligenceServiceClient.PollOnceAnnotateVideo(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
コード例 #13
0
        // [END video_analyze_explicit_content]

        // [START video_speech_transcription_gcs]
        public static object TranscribeVideo(string uri)
        {
            Console.WriteLine("Processing video for speech transcription.");

            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest
            {
                InputUri     = uri,
                Features     = { Feature.SpeechTranscription },
                VideoContext = new VideoContext
                {
                    SpeechTranscriptionConfig = new SpeechTranscriptionConfig
                    {
                        LanguageCode = "en-US",
                        EnableAutomaticPunctuation = true
                    }
                },
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            // There is only one annotation result since only one video is
            // processed.
            var annotationResults = op.Result.AnnotationResults[0];

            foreach (var transcription in annotationResults.SpeechTranscriptions)
            {
                // The number of alternatives for each transcription is limited
                // by SpeechTranscriptionConfig.MaxAlternatives.
                // Each alternative is a different possible transcription
                // and has its own confidence score.
                foreach (var alternative in transcription.Alternatives)
                {
                    Console.WriteLine("Alternative level information:");

                    Console.WriteLine($"Transcript: {alternative.Transcript}");
                    Console.WriteLine($"Confidence: {alternative.Confidence}");

                    foreach (var wordInfo in alternative.Words)
                    {
                        Console.WriteLine($"\t{wordInfo.StartTime} - " +
                                          $"{wordInfo.EndTime}:" +
                                          $"{wordInfo.Word}");
                    }
                }
            }

            return(0);
        }
コード例 #14
0
        // [END video_detect_text_gcs]

        // [START video_detect_text]
        public static object DetectText(string filePath)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest
            {
                InputContent = Google.Protobuf.ByteString.CopyFrom(File.ReadAllBytes(filePath)),
                Features     = { Feature.TextDetection },
            };

            Console.WriteLine("\nProcessing video for text detection.");
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            // Retrieve the first result because only one video was processed.
            var annotationResults = op.Result.AnnotationResults[0];

            // Get only the first result.
            var textAnnotation = annotationResults.TextAnnotations[0];

            Console.WriteLine($"\nText: {textAnnotation.Text}");

            // Get the first text segment.
            var textSegment = textAnnotation.Segments[0];
            var startTime   = textSegment.Segment.StartTimeOffset;
            var endTime     = textSegment.Segment.EndTimeOffset;

            Console.Write(
                $"Start time: {startTime.Seconds + startTime.Nanos / 1e9 }, ");
            Console.WriteLine(
                $"End time: {endTime.Seconds + endTime.Nanos / 1e9 }");

            Console.WriteLine($"Confidence: {textSegment.Confidence}");

            // Show the result for the first frame in this segment.
            var frame      = textSegment.Frames[0];
            var timeOffset = frame.TimeOffset;

            Console.Write("Time offset for the first frame: ");
            Console.WriteLine(timeOffset.Seconds + timeOffset.Nanos * 1e9);
            Console.WriteLine("Rotated Bounding Box Vertices:");
            foreach (var vertex in frame.RotatedBoundingBox.Vertices)
            {
                Console.WriteLine(
                    $"\tVertex x: {vertex.X}, Vertex.y: {vertex.Y}");
            }
            return(0);
        }
コード例 #15
0
        // [END video_analyze_shots]

        // [START video_analyze_labels]
        public static object AnalyzeLabels(string path)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputContent = Google.Protobuf.ByteString.CopyFrom(File.ReadAllBytes(path)),
                Features     = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                PrintLabels("Video", result.SegmentLabelAnnotations);
                PrintLabels("Shot", result.ShotLabelAnnotations);
                PrintLabels("Frame", result.FrameLabelAnnotations);
            }
            return(0);
        }
コード例 #16
0
        // [END video_analyze_labels]

        // [START video_analyze_labels_gcs]
        public static object AnalyzeLabelsGcs(string uri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = uri,
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                PrintLabels("Video", result.SegmentLabelAnnotations);
                PrintLabels("Shot", result.ShotLabelAnnotations);
                PrintLabels("Frame", result.FrameLabelAnnotations);
            }
            return(0);
        }
コード例 #17
0
        static void Main(string[] args)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest
            {
                //InputUri = "gs://cloudmleap/video/next/gbikes_dinosaur.mp4",
                InputUri = "~/home/rowbottom_nathan/NMTexample.mp4",
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                PrintLabels("Video", result.SegmentLabelAnnotations);
                PrintLabels("Shot", result.ShotLabelAnnotations);
                PrintLabels("Frame", result.FrameLabelAnnotations);
            }
        }
コード例 #18
0
        public static void Main(string[] args)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = @"gs://demomaker/cat.mp4",
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.LabelAnnotations)
                {
                    Console.Out.WriteLine(annotation.Description);
                }
            }
        }
コード例 #19
0
        // [END analyze_shots]

        // [START analyze_labels_gcs]
        public static object AnalyzeLabels(string uri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = uri,
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.LabelAnnotations)
                {
                    Console.WriteLine(annotation.Description);
                }
            }
            return(0);
        }
コード例 #20
0
        /// <summary>Snippet for AnnotateVideoAsync</summary>
        public async Task AnnotateVideoRequestObjectAsync()
        {
            // Snippet: AnnotateVideoAsync(AnnotateVideoRequest, CallSettings)
            // Additional: AnnotateVideoAsync(AnnotateVideoRequest, CancellationToken)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = await VideoIntelligenceServiceClient.CreateAsync();

            // Initialize request argument(s)
            AnnotateVideoRequest request = new AnnotateVideoRequest
            {
                InputUri = "",
                Features =
                {
                    Feature.Unspecified,
                },
                VideoContext = new VideoContext(),
                OutputUri    = "",
                LocationId   = "",
                InputContent = ByteString.Empty,
            };
            // Make the request
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> response = await videoIntelligenceServiceClient.AnnotateVideoAsync(request);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> completedResponse = await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> retrievedResponse = await videoIntelligenceServiceClient.PollOnceAnnotateVideoAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
コード例 #21
0
ファイル: GoogleVision.cs プロジェクト: WinTenDev/ZiziBot.NET
    private static ImageAnnotatorClient MakeClient()
    {
        var credPath = BotSettings.GoogleCloudCredentialsPath.SanitizeSlash();

        Log.Information("Instantiates a client, cred {CredPath}", credPath);

        var clientBuilder = new ImageAnnotatorClientBuilder
        {
            CredentialsPath = credPath
        };

        var client = clientBuilder.Build();

        VideoIntelligenceService = new VideoIntelligenceServiceClientBuilder()
        {
            CredentialsPath = credPath
        }.Build();

        return(client);
    }
コード例 #22
0
        // [START analyze_shots]
        public static object AnalyzeShots(string uri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = uri,
                Features = { Feature.ShotChangeDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.ShotAnnotations)
                {
                    Console.Out.WriteLine("Start Time Offset: {0}\tEnd Time Offset: {1}",
                                          annotation.StartTimeOffset, annotation.EndTimeOffset);
                }
            }
            return(0);
        }
コード例 #23
0
        // [END video_analyze_labels]
        // [END video_analyze_labels_gcs]

        // [START video_analyze_explicit_content]
        public static object AnalyzeExplicitContentGcs(string uri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = uri,
                Features = { Feature.ExplicitContentDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var frame in result.ExplicitAnnotation.Frames)
                {
                    Console.WriteLine("Time Offset: {0}", frame.TimeOffset);
                    Console.WriteLine("Pornography Likelihood: {0}", frame.PornographyLikelihood);
                    Console.WriteLine();
                }
            }
            return(0);
        }
        /// <summary>Snippet for AnnotateVideoAsync</summary>
        public async Task AnnotateVideoAsync_RequestObject()
        {
            // Snippet: AnnotateVideoAsync(AnnotateVideoRequest,CallSettings)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = await VideoIntelligenceServiceClient.CreateAsync();

            // Initialize request argument(s)
            AnnotateVideoRequest request = new AnnotateVideoRequest
            {
                InputUri = "gs://demomaker/cat.mp4",
                Features =
                {
                    Feature.LabelDetection,
                },
            };
            // Make the request
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> response =
                await videoIntelligenceServiceClient.AnnotateVideoAsync(request);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> completedResponse =
                await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> retrievedResponse =
                await videoIntelligenceServiceClient.PollOnceAnnotateVideoAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
        public void SpeechTranscription()
        {
            // Sample: SpeechTranscription
            VideoIntelligenceServiceClient client  = VideoIntelligenceServiceClient.Create();
            AnnotateVideoRequest           request = new AnnotateVideoRequest
            {
                InputUri     = "gs://cloud-samples-data/video/googlework_short.mp4",
                Features     = { Feature.SpeechTranscription },
                VideoContext = new VideoContext
                {
                    SpeechTranscriptionConfig = new SpeechTranscriptionConfig
                    {
                        LanguageCode = "en-US",
                        EnableAutomaticPunctuation = true
                    }
                }
            };
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> operation       = client.AnnotateVideo(request);
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> resultOperation = operation.PollUntilCompleted();

            VideoAnnotationResults result = resultOperation.Result.AnnotationResults[0];

            foreach (SpeechTranscription transcription in result.SpeechTranscriptions)
            {
                Console.WriteLine($"Language code: {transcription.LanguageCode}");
                Console.WriteLine("Alternatives:");
                foreach (SpeechRecognitionAlternative alternative in transcription.Alternatives)
                {
                    Console.WriteLine($"({alternative.Confidence}) {alternative.Transcript}");
                }
            }
            // End sample

            IEnumerable <string> allTranscripts = result.SpeechTranscriptions
                                                  .SelectMany(st => st.Alternatives)
                                                  .Select(alt => alt.Transcript);

            Assert.Contains(allTranscripts, t => t.Contains("Paris is special because"));
        }
        public async Task AnnotateVideoAsync()
        {
            // Snippet: AnnotateVideoAsync(string,IEnumerable<Feature>,ByteString,VideoContext,string,string,CallSettings)
            // Additional: AnnotateVideoAsync(string,IEnumerable<Feature>,ByteString,VideoContext,string,string,CancellationToken)
            // Create client
            VideoIntelligenceServiceClient videoIntelligenceServiceClient = await VideoIntelligenceServiceClient.CreateAsync();

            // Initialize request argument(s)
            string inputUri = "";
            IEnumerable <Feature> features     = new List <Feature>();
            ByteString            inputContent = ByteString.CopyFromUtf8("");
            VideoContext          videoContext = new VideoContext();
            string outputUri  = "";
            string locationId = "";
            // Make the request
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> response =
                await videoIntelligenceServiceClient.AnnotateVideoAsync(inputUri, features, inputContent, videoContext, outputUri, locationId);

            // Poll until the returned long-running operation is complete
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> completedResponse =
                await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            AnnotateVideoResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AnnotateVideoResponse, AnnotateVideoProgress> retrievedResponse =
                await videoIntelligenceServiceClient.PollOnceAnnotateVideoAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AnnotateVideoResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
コード例 #27
0
    protected void Page_Load(object sender, EventArgs e)
    {
        //Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "D:\\KYZ\\KYZApp\\Content\\Security\\HotSpexAPI.json");
        try
        {
            //var credential = GoogleCredential.FromFile("D:\\KYZ\\KYZApp\\Content\\Security\\HotSpexAPI.json");
            var credential = GoogleCredential.GetApplicationDefault();

            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = @"gs://cloud-samples-data/video/cat.mp4",
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();
            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.SegmentLabelAnnotations)
                {
                    string desc = annotation.Entity.Description;
                    foreach (var entity in annotation.CategoryEntities)
                    {
                        string vidfeolabelDesc = entity.Description;
                    }
                    foreach (var segment in annotation.Segments)
                    {
                        Console.Write("Segment location: ");
                        Console.Write(segment.Segment.StartTimeOffset);
                        Console.Write(":");
                        Console.WriteLine(segment.Segment.EndTimeOffset);
                        System.Console.WriteLine(segment.Confidence);
                    }
                }
            }
        }catch (Exception ex)
        {
            string zz = Environment.GetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS");
        }
    }
コード例 #28
0
        // [END video_detect_logo]

        // [START video_detect_logo_gcs]
        public static object DetectLogoGcs(string gcsUri)
        {
            var client  = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = gcsUri,
                Features = { Feature.LogoRecognition }
            };

            Console.WriteLine("\nWaiting for operation to complete...");
            var op = client.AnnotateVideo(request).PollUntilCompleted();

            // The first result is retrieved because a single video was processed.
            var annotationResults = op.Result.AnnotationResults[0];

            // Annotations for list of logos detected, tracked and recognized in video.
            foreach (var logoRecognitionAnnotation in annotationResults.LogoRecognitionAnnotations)
            {
                var entity = logoRecognitionAnnotation.Entity;
                // Opaque entity ID. Some IDs may be available in
                // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
                Console.WriteLine($"Entity ID :{entity.EntityId}");
                Console.WriteLine($"Description :{entity.Description}");

                // All logo tracks where the recognized logo appears. Each track corresponds to one logo
                // instance appearing in consecutive frames.
                foreach (var track in logoRecognitionAnnotation.Tracks)
                {
                    // Video segment of a track.
                    var startTimeOffset = track.Segment.StartTimeOffset;
                    Console.WriteLine(
                        $"Start Time Offset: {startTimeOffset.Seconds}.{startTimeOffset.Nanos}");
                    var endTimeOffset = track.Segment.EndTimeOffset;
                    Console.WriteLine(
                        $"End Time Offset: {endTimeOffset.Seconds}.{endTimeOffset.Seconds}");
                    Console.WriteLine($"\tConfidence: {track.Confidence}");

                    // The object with timestamp and attributes per frame in the track.
                    foreach (var timestampedObject in track.TimestampedObjects)
                    {
                        // Normalized Bounding box in a frame, where the object is located.
                        var normalizedBoundingBox = timestampedObject.NormalizedBoundingBox;
                        Console.WriteLine($"Left: {normalizedBoundingBox.Left}");
                        Console.WriteLine($"Top: {normalizedBoundingBox.Top}");
                        Console.WriteLine($"Right: {normalizedBoundingBox.Right}");
                        Console.WriteLine($"Bottom: {normalizedBoundingBox.Bottom}");

                        // Optional. The attributes of the object in the bounding box.
                        foreach (var attribute in timestampedObject.Attributes)
                        {
                            Console.WriteLine($"Name: {attribute.Name}");
                            Console.WriteLine($"Confidence: {attribute.Confidence}");
                            Console.WriteLine($"Value: {attribute.Value}");
                        }

                        // Optional. Attributes in the track level.
                        foreach (var trackAttribute in track.Attributes)
                        {
                            Console.WriteLine($"Name : {trackAttribute.Name}");
                            Console.WriteLine($"Confidence : {trackAttribute.Confidence}");
                            Console.WriteLine($"Value : {trackAttribute.Value}");
                        }
                    }

                    // All video segments where the recognized logo appears. There might be multiple instances
                    // of the same logo class appearing in one VideoSegment.
                    foreach (var segment in logoRecognitionAnnotation.Segments)
                    {
                        Console.WriteLine(
                            $"Start Time Offset : {segment.StartTimeOffset.Seconds}.{segment.StartTimeOffset.Nanos}");
                        Console.WriteLine(
                            $"End Time Offset : {segment.EndTimeOffset.Seconds}.{segment.EndTimeOffset.Nanos}");
                    }
                }
            }
            return(0);
        }