importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage=UIImage(systemName:"bicycle")else{fatalError()}// Provide a text prompt to include with the imageletprompt="What's in this picture?"// To generate text output, call generateContent and pass in the promptletresponse=tryawaitmodel.generateContent(image,prompt)print(response.text??"No text in response.")
多文件输入
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage1=UIImage(systemName:"car")else{fatalError()}guardletimage2=UIImage(systemName:"car.2")else{fatalError()}// Provide a text prompt to include with the imagesletprompt="What's different between these pictures?"// To generate text output, call generateContent and pass in the promptletresponse=tryawaitmodel.generateContent(image1,image2,prompt)print(response.text??"No text in response.")
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)// Provide a prompt that includes the image specified above and textvalprompt=content{image(bitmap)text("What developer tool is this mascot from?")}// To generate text output, call generateContent with the promptvalresponse=generativeModel.generateContent(prompt)print(response.text)
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap1:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)valbitmap2:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky_eats_pizza)// Provide a prompt that includes the images specified above and textvalprompt=content{image(bitmap1)image(bitmap2)text("What is different between these pictures?")}// To generate text output, call generateContent with the promptvalresponse=generativeModel.generateContent(prompt)print(response.text)
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);// Provide a prompt that includes the image specified above and textContentcontent=newContent.Builder().addImage(bitmap).addText("What developer tool is this mascot from?").build();// To generate text output, call generateContent with the promptListenableFuture<GenerateContentResponse>response=model.generateContent(content);Futures.addCallback(response,newFutureCallback<GenerateContentResponse>(){@OverridepublicvoidonSuccess(GenerateContentResponseresult){StringresultText=result.getText();System.out.println(resultText);}@OverridepublicvoidonFailure(Throwablet){t.printStackTrace();}},executor);
多文件输入
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap1=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);Bitmapbitmap2=BitmapFactory.decodeResource(getResources(),R.drawable.sparky_eats_pizza);// Provide a prompt that includes the images specified above and textContentprompt=newContent.Builder().addImage(bitmap1).addImage(bitmap2).addText("What's different between these pictures?").build();// To generate text output, call generateContent with the promptListenableFuture<GenerateContentResponse>response=model.generateContent(prompt);Futures.addCallback(response,newFutureCallback<GenerateContentResponse>(){@OverridepublicvoidonSuccess(GenerateContentResponseresult){StringresultText=result.getText();System.out.println(resultText);}@OverridepublicvoidonFailure(Throwablet){t.printStackTrace();}},executor);
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imageconstprompt="What's different between these pictures?";constfileInputEl=document.querySelector("input[type=file]");constimagePart=awaitfileToGenerativePart(fileInputEl.files[0]);// To generate text output, call generateContent with the text and imageconstresult=awaitmodel.generateContent([prompt,imagePart]);constresponse=result.response;consttext=response.text();console.log(text);}run();
多文件输入
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imagesconstprompt="What's different between these pictures?";// Prepare images for inputconstfileInputEl=document.querySelector("input[type=file]");constimageParts=awaitPromise.all([...fileInputEl.files].map(fileToGenerativePart));// To generate text output, call generateContent with the text and imagesconstresult=awaitmodel.generateContent([prompt,...imageParts]);constresponse=result.response;consttext=response.text();console.log(text);}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');// Provide a text prompt to include with the imagefinalprompt=TextPart("What's in the picture?");// Prepare images for inputfinalimage=awaitFile('image0.jpg').readAsBytes();finalimagePart=InlineDataPart('image/jpeg',image);// To generate text output, call generateContent with the text and imagefinalresponse=awaitmodel.generateContent([Content.multi([prompt,imagePart])]);print(response.text);
多文件输入
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');final(firstImage,secondImage)=await(File('image0.jpg').readAsBytes(),File('image1.jpg').readAsBytes()).wait;// Provide a text prompt to include with the imagesfinalprompt=TextPart("What's different between these pictures?");// Prepare images for inputfinalimageParts=[InlineDataPart('image/jpeg',firstImage),InlineDataPart('image/jpeg',secondImage),];// To generate text output, call generateContent with the text and imagesfinalresponse=awaitmodel.generateContent([Content.multi([prompt,...imageParts])]);print(response.text);
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage=UIImage(systemName:"bicycle")else{fatalError()}// Provide a text prompt to include with the imageletprompt="What's in this picture?"// To stream generated text output, call generateContentStream and pass in the promptletcontentStream=trymodel.generateContentStream(image,prompt)fortryawaitchunkincontentStream{iflettext=chunk.text{print(text)}}
多文件输入
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage1=UIImage(systemName:"car")else{fatalError()}guardletimage2=UIImage(systemName:"car.2")else{fatalError()}// Provide a text prompt to include with the imagesletprompt="What's different between these pictures?"// To stream generated text output, call generateContentStream and pass in the promptletcontentStream=trymodel.generateContentStream(image1,image2,prompt)fortryawaitchunkincontentStream{iflettext=chunk.text{print(text)}}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)// Provide a prompt that includes the image specified above and textvalprompt=content{image(bitmap)text("What developer tool is this mascot from?")}// To stream generated text output, call generateContentStream with the promptvarfullResponse=""generativeModel.generateContentStream(prompt).collect{chunk->print(chunk.text)fullResponse+=chunk.text}
多文件输入
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap1:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)valbitmap2:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky_eats_pizza)// Provide a prompt that includes the images specified above and textvalprompt=content{image(bitmap1)image(bitmap2)text("What's different between these pictures?")}// To stream generated text output, call generateContentStream with the promptvarfullResponse=""generativeModel.generateContentStream(prompt).collect{chunk->print(chunk.text)fullResponse+=chunk.text}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);// Provide a prompt that includes the image specified above and textContentprompt=newContent.Builder().addImage(bitmap).addText("What developer tool is this mascot from?").build();// To stream generated text output, call generateContentStream with the promptPublisher<GenerateContentResponse>streamingResponse=model.generateContentStream(prompt);finalString[]fullResponse={""};streamingResponse.subscribe(newSubscriber<GenerateContentResponse>(){@OverridepublicvoidonNext(GenerateContentResponsegenerateContentResponse){Stringchunk=generateContentResponse.getText();fullResponse[0]+=chunk;}@OverridepublicvoidonComplete(){System.out.println(fullResponse[0]);}@OverridepublicvoidonError(Throwablet){t.printStackTrace();}@OverridepublicvoidonSubscribe(Subscriptions){}});
多文件输入
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap1=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);Bitmapbitmap2=BitmapFactory.decodeResource(getResources(),R.drawable.sparky_eats_pizza);// Provide a prompt that includes the images specified above and textContentprompt=newContent.Builder().addImage(bitmap1).addImage(bitmap2).addText("What's different between these pictures?").build();// To stream generated text output, call generateContentStream with the promptPublisher<GenerateContentResponse>streamingResponse=model.generateContentStream(prompt);finalString[]fullResponse={""};streamingResponse.subscribe(newSubscriber<GenerateContentResponse>(){@OverridepublicvoidonNext(GenerateContentResponsegenerateContentResponse){Stringchunk=generateContentResponse.getText();fullResponse[0]+=chunk;}@OverridepublicvoidonComplete(){System.out.println(fullResponse[0]);}@OverridepublicvoidonError(Throwablet){t.printStackTrace();}@OverridepublicvoidonSubscribe(Subscriptions){}});
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imageconstprompt="What do you see?";// Prepare image for inputconstfileInputEl=document.querySelector("input[type=file]");constimagePart=awaitfileToGenerativePart(fileInputEl.files[0]);// To stream generated text output, call generateContentStream with the text and imageconstresult=awaitmodel.generateContentStream([prompt,imagePart]);forawait(constchunkofresult.stream){constchunkText=chunk.text();console.log(chunkText);}}run();
多文件输入
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imagesconstprompt="What's different between these pictures?";constfileInputEl=document.querySelector("input[type=file]");constimageParts=awaitPromise.all([...fileInputEl.files].map(fileToGenerativePart));// To stream generated text output, call generateContentStream with the text and imagesconstresult=awaitmodel.generateContentStream([prompt,...imageParts]);forawait(constchunkofresult.stream){constchunkText=chunk.text();console.log(chunkText);}}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');// Provide a text prompt to include with the imagefinalprompt=TextPart("What's in the picture?");// Prepare images for inputfinalimage=awaitFile('image0.jpg').readAsBytes();finalimagePart=InlineDataPart('image/jpeg',image);// To stream generated text output, call generateContentStream with the text and imagefinalresponse=awaitmodel.generateContentStream([Content.multi([prompt,imagePart])]);awaitfor(finalchunkinresponse){print(chunk.text);}
多文件输入
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');final(firstImage,secondImage)=await(File('image0.jpg').readAsBytes(),File('image1.jpg').readAsBytes()).wait;// Provide a text prompt to include with the imagesfinalprompt=TextPart("What's different between these pictures?");// Prepare images for inputfinalimageParts=[InlineDataPart('image/jpeg',firstImage),InlineDataPart('image/jpeg',secondImage),];// To stream generated text output, call generateContentStream with the text and imagesfinalresponse=awaitmodel.generateContentStream([Content.multi([prompt,...imageParts])]);awaitfor(finalchunkinresponse){print(chunk.text);}