CLIP(Contrastive Language-Image Pretraining)跨模态嵌入生成器,实现基于CLIP模型的图文联合嵌入空间映射
输入列名 | 说明 |
|---|---|
content | 包含输入数据的数组,支持以下元素类型: - 文本模式: UTF-8字符串 - 图像模式: Base64字符串/二进制数据/图像URL |
包含浮点数嵌入向量的数组,每个元素为List[float]。
如参数没有默认值,则为必填参数
参数名称 | 类型 | 默认值 | 描述 |
|---|---|---|---|
content_type | str | image_url | 输入图像的格式类型,支持: - 文本(text) - tos/http 地址(image_url) - base64 编码(image_base64) - 二进制流(image_binary) 可选值:["text", "image_url", "image_base64", "image_binary"] 默认值:"image_url" |
batch_size | int | 16 | 批量计算数据量,默认:16 |
model_path | str | /opt/las/models | 模型存储路径,默认:'/opt/las/models'(内部参数) |
model_name | str | iic/multi-modal_clip-vit-base-patch16_zh | 模型名称,可选: - 'iic/multi-modal_clip-vit-base-patch16_zh' - 'iic/multi-modal_clip-vit-huge-patch14_zh' - 'iic/multi-modal_clip-vit-large-patch14_zh' - 'iic/multi-modal_clip-vit-large-patch14_336_zh' 默认:'iic/multi-modal_clip-vit-base-patch16_zh' |
model_version | str | v1.0.1 | 模型版本,当前仅支持'v1.0.1' |
rank | int | 0 | 指定GPU设备编号(多卡环境有效),默认:0(内部参数) |
下面的代码展示了如何使用 daft 运行图文 embedding 算子, 生成图文 embedding 。
from __future__ import annotations import os import daft from daft import col from daft.las.functions.multimodal.embedding.clip_embedding import ClipEmbedding from daft.las.functions.udf import las_udf if __name__ == "__main__": if os.getenv("DAFT_RUNNER", "native") == "ray": import logging import ray def configure_logging(): logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S.%s".format(), ) logging.getLogger("tracing.span").setLevel(logging.WARNING) logging.getLogger("daft_io.stats").setLevel(logging.WARNING) logging.getLogger("DaftStatisticsManager").setLevel(logging.WARNING) logging.getLogger("DaftFlotillaScheduler").setLevel(logging.WARNING) logging.getLogger("DaftFlotillaDispatcher").setLevel(logging.WARNING) ray.init(dashboard_host="0.0.0.0", runtime_env={"worker_process_setup_hook": configure_logging}) daft.context.set_runner_ray() daft.set_execution_config(actor_udf_ready_timeout=600) daft.set_execution_config(min_cpu_per_task=0) samples = {"text": ["皮卡丘", "小狗", "小猫", None]} content_type = "text" model_path = os.getenv("MODEL_PATH", "/opt/las/models") model_name = "iic/multi-modal_clip-vit-base-patch16_zh" model_version = "v1.0.1" embedding_col_name = "embedding" batch_size = 2 rank = 0 num_gpus = 1 ds = daft.from_pydict(samples) ds = ds.with_column( "embedding", las_udf( ClipEmbedding, construct_args={ "content_type": content_type, "model_path": model_path, "model_name": model_name, "model_version": model_version, "batch_size": batch_size, "rank": rank, }, num_gpus=num_gpus, batch_size=1, concurrency=1, )(col("text")), ) ds.show() # ╭────────┬────────────────────────────────╮ # │ text ┆ embedding │ # │ --- ┆ --- │ # │ Utf8 ┆ List[Float32] │ # ╞════════╪════════════════════════════════╡ # │ 皮卡丘 ┆ [0.12005615, -0.009140015, -0… │ # ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ # │ 小狗 ┆ [0.12963867, 0.00039935112, 0… │ # ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ # │ 小猫 ┆ [0.12670898, 0.015533447, 0.0… │ # ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤ # │ None ┆ None │ # ╰────────┴────────────────────────────────╯ tos_dir_url = os.getenv("TOS_DIR_URL", "las-cn-beijing-public-online.tos-cn-beijing.volces.com") samples = { "image_path": [ f"https://{tos_dir_url}/public/shared_image_dataset/cat_ip_adapter.jpeg" ] } ds = daft.from_pydict(samples) ds = ds.with_column( "embedding", las_udf( ClipEmbedding, construct_args={ "content_type": "image_url", "model_path": model_path, "model_name": model_name, "model_version": model_version, "batch_size": batch_size, "rank": rank, }, num_gpus=num_gpus, batch_size=1, )(col("image_path")), ) ds.show() # ╭────────────────────────────────┬────────────────────────────────╮ # │ image_path ┆ embedding │ # │ --- ┆ --- │ # │ Utf8 ┆ List[Float32] │ # ╞════════════════════════════════╪════════════════════════════════╡ # │ tos://las-cn-beijing-public-o… ┆ [0.04598999, -0.090148926, -0… │ # ╰────────────────────────────────┴────────────────────────────────╯