网站优化推广费用,建设网站要什么手续,国外酷炫flash网站,设计理念Mac平台M1PRO芯片MiniCPM-V-2.6网页部署跑通
契机
⚙ 2.6的小钢炮可以输入视频了#xff0c;我必须拉到本地跑跑。主要解决2.6版本默认绑定flash_atten问题#xff0c;pip install flash_attn也无法安装#xff0c;因为强制依赖cuda。主要解决的就是这个问题#xff0c;还…Mac平台M1PRO芯片MiniCPM-V-2.6网页部署跑通
契机
⚙ 2.6的小钢炮可以输入视频了我必须拉到本地跑跑。主要解决2.6版本默认绑定flash_atten问题pip install flash_attn也无法安装因为强制依赖cuda。主要解决的就是这个问题还有 BFloat16 is not supported on MPS问题解决。
环境
macos版本版本15.0 Beta版(24A5279h) || 版本15.1 Beta版(24B5009l)芯片m1 pro代码仓库https://github.com/OpenBMB/MiniCPM-V.git分支main代码版本b0125d8a yiranyyu 2606375857qq.com on 2024/8/9 at 10:25python版本3.9
解决问题
#拉下这个仓库
git clone [https://github.com/OpenBMB/MiniCPM-V.git](https://github.com/OpenBMB/MiniCPM-V.git) #把requirements.txt安装下
#modelscope需要手动安装
pip install http://thunlp.oss-cn-qingdao.aliyuncs.com/multi_modal/never_delete/modelscope_studio-0.4.0.9-py3-none-any.whl
#dcord如果安装有问题参考我LAVIS博客#找到根目录web_demo_2.6.py运行
#首先添加环境变量mps参数见下图
--device mps
PYTORCH_ENABLE_MPS_FALLBACK1#第一次运行web_demo_2.6.py报错如下
ImportError: This modeling file requires the following packages that were not found in your environment: flash_attn. Run pip install flash_attn#直接修改代码
from typing import Union
from transformers.dynamic_module_utils import get_imports
from unittest.mock import patch
# fix the imports
def fixed_get_imports(filename: Union[str, os.PathLike]) - list[str]:imports get_imports(filename)if not torch.cuda.is_available() and flash_attn in imports:imports.remove(flash_attn)return imports#79行左右修改为
with patch(transformers.dynamic_module_utils.get_imports, fixed_get_imports):model AutoModel.from_pretrained(model_path, trust_remote_codeTrue, torch_dtypetorch.bfloat16)model model.to(devicedevice)完整代码如下
#!/usr/bin/env python
# encoding: utf-8
import torch
import argparse
from transformers import AutoModel, AutoTokenizer
import gradio as gr
from PIL import Image
from decord import VideoReader, cpu
import io
import os
import copy
import requests
import base64
import json
import traceback
import re
import modelscope_studio as mgr
from typing import Union
from transformers.dynamic_module_utils import get_imports
from unittest.mock import patch# README, How to run demo on different devices# For Nvidia GPUs.
# python web_demo_2.6.py --device cuda# For Mac with MPS (Apple silicon or AMD GPUs).
# PYTORCH_ENABLE_MPS_FALLBACK1 python web_demo_2.6.py --device mps# Argparser
parser argparse.ArgumentParser(descriptiondemo)
parser.add_argument(--device, typestr, defaultcuda, helpcuda or mps)
parser.add_argument(--multi-gpus, actionstore_true, defaultFalse, helpuse multi-gpus)
args parser.parse_args()
device args.device
assert device in [cuda, mps]# fix the imports
def fixed_get_imports(filename: Union[str, os.PathLike]) - list[str]:imports get_imports(filename)if not torch.cuda.is_available() and flash_attn in imports:imports.remove(flash_attn)return imports# Load model
model_path openbmb/MiniCPM-V-2_6
if int4 in model_path:if device mps:print(Error: running int4 model with bitsandbytes on Mac is not supported right now.)exit()model AutoModel.from_pretrained(model_path, trust_remote_codeTrue)
else:if args.multi_gpus:from accelerate import load_checkpoint_and_dispatch, init_empty_weights, infer_auto_device_mapwith init_empty_weights():model AutoModel.from_pretrained(model_path, trust_remote_codeTrue, attn_implementationsdpa, torch_dtypetorch.bfloat16)device_map infer_auto_device_map(model, max_memory{0: 10GB, 1: 10GB},no_split_module_classes[SiglipVisionTransformer, Qwen2DecoderLayer])device_id device_map[llm.model.embed_tokens]device_map[llm.lm_head] device_id # firtt and last layer should be in same devicedevice_map[vpm] device_iddevice_map[resampler] device_iddevice_id2 device_map[llm.model.layers.26]device_map[llm.model.layers.8] device_id2device_map[llm.model.layers.9] device_id2device_map[llm.model.layers.10] device_id2device_map[llm.model.layers.11] device_id2device_map[llm.model.layers.12] device_id2device_map[llm.model.layers.13] device_id2device_map[llm.model.layers.14] device_id2device_map[llm.model.layers.15] device_id2device_map[llm.model.layers.16] device_id2#print(device_map)model load_checkpoint_and_dispatch(model, model_path, dtypetorch.bfloat16, device_mapdevice_map)else:with patch(transformers.dynamic_module_utils.get_imports, fixed_get_imports):model AutoModel.from_pretrained(model_path, trust_remote_codeTrue, torch_dtypetorch.bfloat16)model model.to(devicedevice)
tokenizer AutoTokenizer.from_pretrained(model_path, trust_remote_codeTrue)
model.eval()ERROR_MSG Error, please retry
model_name MiniCPM-V 2.6
MAX_NUM_FRAMES 64
IMAGE_EXTENSIONS {.jpg, .jpeg, .png, .bmp, .tiff, .webp}
VIDEO_EXTENSIONS {.mp4, .mkv, .mov, .avi, .flv, .wmv, .webm, .m4v}def get_file_extension(filename):return os.path.splitext(filename)[1].lower()def is_image(filename):return get_file_extension(filename) in IMAGE_EXTENSIONSdef is_video(filename):return get_file_extension(filename) in VIDEO_EXTENSIONSform_radio {choices: [Beam Search, Sampling],#value: Beam Search,value: Sampling,interactive: True,label: Decode Type
}def create_component(params, compSlider):if comp Slider:return gr.Slider(minimumparams[minimum],maximumparams[maximum],valueparams[value],stepparams[step],interactiveparams[interactive],labelparams[label])elif comp Radio:return gr.Radio(choicesparams[choices],valueparams[value],interactiveparams[interactive],labelparams[label])elif comp Button:return gr.Button(valueparams[value],interactiveTrue)def create_multimodal_input(upload_image_disabledFalse, upload_video_disabledFalse):return mgr.MultimodalInput(upload_image_button_props{label: Upload Image, disabled: upload_image_disabled, file_count: multiple},upload_video_button_props{label: Upload Video, disabled: upload_video_disabled, file_count: single},submit_button_props{label: Submit})def chat(img, msgs, ctx, paramsNone, vision_hidden_statesNone):try:print(msgs:, msgs)answer model.chat(imageNone,msgsmsgs,tokenizertokenizer,**params)res re.sub(r(box.*/box), , answer)res res.replace(ref, )res res.replace(/ref, )res res.replace(box, )answer res.replace(/box, )print(answer:, answer)return 0, answer, None, Noneexcept Exception as e:print(e)traceback.print_exc()return -1, ERROR_MSG, None, Nonedef encode_image(image):if not isinstance(image, Image.Image):if hasattr(image, path):image Image.open(image.path).convert(RGB)else:image Image.open(image.file.path).convert(RGB)# resize to max_sizemax_size 448*16if max(image.size) max_size:w,h image.sizeif w h:new_w max_sizenew_h int(h * max_size / w)else:new_h max_sizenew_w int(w * max_size / h)image image.resize((new_w, new_h), resampleImage.BICUBIC)return image## save by BytesIO and convert to base64#buffered io.BytesIO()#image.save(buffered, formatpng)#im_b64 base64.b64encode(buffered.getvalue()).decode()#return {type: image, pairs: im_b64}def encode_video(video):def uniform_sample(l, n):gap len(l) / nidxs [int(i * gap gap / 2) for i in range(n)]return [l[i] for i in idxs]if hasattr(video, path):vr VideoReader(video.path, ctxcpu(0))else:vr VideoReader(video.file.path, ctxcpu(0))sample_fps round(vr.get_avg_fps() / 1) # FPSframe_idx [i for i in range(0, len(vr), sample_fps)]if len(frame_idx)MAX_NUM_FRAMES:frame_idx uniform_sample(frame_idx, MAX_NUM_FRAMES)video vr.get_batch(frame_idx).asnumpy()video [Image.fromarray(v.astype(uint8)) for v in video]video [encode_image(v) for v in video]print(video frames:, len(video))return videodef check_mm_type(mm_file):if hasattr(mm_file, path):path mm_file.pathelse:path mm_file.file.pathif is_image(path):return imageif is_video(path):return videoreturn Nonedef encode_mm_file(mm_file):if check_mm_type(mm_file) image:return [encode_image(mm_file)]if check_mm_type(mm_file) video:return encode_video(mm_file)return Nonedef make_text(text):#return {type: text, pairs: text} # # For remote callreturn textdef encode_message(_question):files _question.filesquestion _question.textpattern r\[mm_media\]\d\[/mm_media\]matches re.split(pattern, question)message []if len(matches) ! len(files) 1:gr.Warning(Number of Images not match the placeholder in text, please refresh the page to restart!)assert len(matches) len(files) 1text matches[0].strip()if text:message.append(make_text(text))for i in range(len(files)):message encode_mm_file(files[i])text matches[i 1].strip()if text:message.append(make_text(text))return messagedef check_has_videos(_question):images_cnt 0videos_cnt 0for file in _question.files:if check_mm_type(file) image:images_cnt 1else:videos_cnt 1return images_cnt, videos_cntdef count_video_frames(_context):num_frames 0for message in _context:for item in message[content]:#if item[type] image: # For remote callif isinstance(item, Image.Image):num_frames 1return num_framesdef respond(_question, _chat_bot, _app_cfg, params_form):_context _app_cfg[ctx].copy()_context.append({role: user, content: encode_message(_question)})images_cnt _app_cfg[images_cnt]videos_cnt _app_cfg[videos_cnt]files_cnts check_has_videos(_question)if files_cnts[1] videos_cnt 1 or (files_cnts[1] videos_cnt 1 and files_cnts[0] images_cnt 0):gr.Warning(Only supports single video file input right now!)return _question, _chat_bot, _app_cfgif params_form Beam Search:params {sampling: False,num_beams: 3,repetition_penalty: 1.2,max_new_tokens: 2048}else:params {sampling: True,top_p: 0.8,top_k: 100,temperature: 0.7,repetition_penalty: 1.05,max_new_tokens: 2048}if files_cnts[1] videos_cnt 0:params[max_inp_length] 4352 # 4096256params[use_image_id] Falseparams[max_slice_nums] 1 if count_video_frames(_context) 16 else 2code, _answer, _, sts chat(, _context, None, params)images_cnt files_cnts[0]videos_cnt files_cnts[1]_context.append({role: assistant, content: [make_text(_answer)]})_chat_bot.append((_question, _answer))if code 0:_app_cfg[ctx]_context_app_cfg[sts]sts_app_cfg[images_cnt] images_cnt_app_cfg[videos_cnt] videos_cntupload_image_disabled videos_cnt 0upload_video_disabled videos_cnt 0 or images_cnt 0return create_multimodal_input(upload_image_disabled, upload_video_disabled), _chat_bot, _app_cfgdef fewshot_add_demonstration(_image, _user_message, _assistant_message, _chat_bot, _app_cfg):ctx _app_cfg[ctx]message_item []if _image is not None:image Image.open(_image).convert(RGB)ctx.append({role: user, content: [encode_image(image), make_text(_user_message)]})message_item.append({text: [mm_media]1[/mm_media] _user_message, files: [_image]})else:if _user_message:ctx.append({role: user, content: [make_text(_user_message)]})message_item.append({text: _user_message, files: []})else:message_item.append(None)if _assistant_message:ctx.append({role: assistant, content: [make_text(_assistant_message)]})message_item.append({text: _assistant_message, files: []})else:message_item.append(None)_chat_bot.append(message_item)return None, , , _chat_bot, _app_cfgdef fewshot_respond(_image, _user_message, _chat_bot, _app_cfg, params_form):user_message_contents []_context _app_cfg[ctx].copy()if _image:image Image.open(_image).convert(RGB)user_message_contents [encode_image(image)]if _user_message:user_message_contents [make_text(_user_message)]if user_message_contents:_context.append({role: user, content: user_message_contents})if params_form Beam Search:params {sampling: False,num_beams: 3,repetition_penalty: 1.2,max_new_tokens: 2048}else:params {sampling: True,top_p: 0.8,top_k: 100,temperature: 0.7,repetition_penalty: 1.05,max_new_tokens: 2048}code, _answer, _, sts chat(, _context, None, params)_context.append({role: assistant, content: [make_text(_answer)]})if _image:_chat_bot.append([{text: [mm_media]1[/mm_media] _user_message, files: [_image]},{text: _answer, files: []}])else:_chat_bot.append([{text: _user_message, files: [_image]},{text: _answer, files: []}])if code 0:_app_cfg[ctx]_context_app_cfg[sts]stsreturn None, , , _chat_bot, _app_cfgdef regenerate_button_clicked(_question, _image, _user_message, _assistant_message, _chat_bot, _app_cfg, params_form):if len(_chat_bot) 1 or not _chat_bot[-1][1]:gr.Warning(No question for regeneration.)return , _image, _user_message, _assistant_message, _chat_bot, _app_cfgif _app_cfg[chat_type] Chat:images_cnt _app_cfg[images_cnt]videos_cnt _app_cfg[videos_cnt]_question _chat_bot[-1][0]_chat_bot _chat_bot[:-1]_app_cfg[ctx] _app_cfg[ctx][:-2]files_cnts check_has_videos(_question)images_cnt - files_cnts[0]videos_cnt - files_cnts[1]_app_cfg[images_cnt] images_cnt_app_cfg[videos_cnt] videos_cntupload_image_disabled videos_cnt 0upload_video_disabled videos_cnt 0 or images_cnt 0_question, _chat_bot, _app_cfg respond(_question, _chat_bot, _app_cfg, params_form)return _question, _image, _user_message, _assistant_message, _chat_bot, _app_cfgelse:last_message _chat_bot[-1][0]last_image Nonelast_user_message if last_message.text:last_user_message last_message.textif last_message.files:last_image last_message.files[0].file.path_chat_bot _chat_bot[:-1]_app_cfg[ctx] _app_cfg[ctx][:-2]_image, _user_message, _assistant_message, _chat_bot, _app_cfg fewshot_respond(last_image, last_user_message, _chat_bot, _app_cfg, params_form)return _question, _image, _user_message, _assistant_message, _chat_bot, _app_cfgdef flushed():return gr.update(interactiveTrue)def clear(txt_message, chat_bot, app_session):txt_message.files.clear()txt_message.text chat_bot copy.deepcopy(init_conversation)app_session[sts] Noneapp_session[ctx] []app_session[images_cnt] 0app_session[videos_cnt] 0return create_multimodal_input(), chat_bot, app_session, None, , def select_chat_type(_tab, _app_cfg):_app_cfg[chat_type] _tabreturn _app_cfginit_conversation [[None,{# The first message of bot closes the typewriter.text: You can talk to me now,flushing: False}],
]css
video { height: auto !important; }
.example label { font-size: 16px;}
introduction ## Features:
1. Chat with single image
2. Chat with multiple images
3. Chat with video
4. In-context few-shot learningClick How to use tab to see examples.
with gr.Blocks(csscss) as demo:with gr.Tab(model_name):with gr.Row():with gr.Column(scale1, min_width300):gr.Markdown(valueintroduction)params_form create_component(form_radio, compRadio)regenerate create_component({value: Regenerate}, compButton)clear_button create_component({value: Clear History}, compButton)with gr.Column(scale3, min_width500):app_session gr.State({sts:None,ctx:[], images_cnt: 0, videos_cnt: 0, chat_type: Chat})chat_bot mgr.Chatbot(labelfChat with {model_name}, valuecopy.deepcopy(init_conversation), height600, flushingFalse, bubble_full_widthFalse)with gr.Tab(Chat) as chat_tab:txt_message create_multimodal_input()chat_tab_label gr.Textbox(valueChat, interactiveFalse, visibleFalse)txt_message.submit(respond,[txt_message, chat_bot, app_session, params_form],[txt_message, chat_bot, app_session])with gr.Tab(Few Shot) as fewshot_tab:fewshot_tab_label gr.Textbox(valueFew Shot, interactiveFalse, visibleFalse)with gr.Row():with gr.Column(scale1):image_input gr.Image(typefilepath, sources[upload])with gr.Column(scale3):user_message gr.Textbox(labelUser)assistant_message gr.Textbox(labelAssistant)with gr.Row():add_demonstration_button gr.Button(Add Example)generate_button gr.Button(valueGenerate, variantprimary)add_demonstration_button.click(fewshot_add_demonstration,[image_input, user_message, assistant_message, chat_bot, app_session],[image_input, user_message, assistant_message, chat_bot, app_session])generate_button.click(fewshot_respond,[image_input, user_message, chat_bot, app_session, params_form],[image_input, user_message, assistant_message, chat_bot, app_session])chat_tab.select(select_chat_type,[chat_tab_label, app_session],[app_session])chat_tab.select( # do clearclear,[txt_message, chat_bot, app_session],[txt_message, chat_bot, app_session, image_input, user_message, assistant_message])fewshot_tab.select(select_chat_type,[fewshot_tab_label, app_session],[app_session])fewshot_tab.select( # do clearclear,[txt_message, chat_bot, app_session],[txt_message, chat_bot, app_session, image_input, user_message, assistant_message])chat_bot.flushed(flushed,outputs[txt_message])regenerate.click(regenerate_button_clicked,[txt_message, image_input, user_message, assistant_message, chat_bot, app_session, params_form],[txt_message, image_input, user_message, assistant_message, chat_bot, app_session])clear_button.click(clear,[txt_message, chat_bot, app_session],[txt_message, chat_bot, app_session, image_input, user_message, assistant_message])with gr.Tab(How to use):with gr.Column():with gr.Row():image_example gr.Image(valuehttp://thunlp.oss-cn-qingdao.aliyuncs.com/multi_modal/never_delete/m_bear2.gif, label1. Chat with single or multiple images, interactiveFalse, width400, elem_classesexample)example2 gr.Image(valuehttp://thunlp.oss-cn-qingdao.aliyuncs.com/multi_modal/never_delete/video2.gif, label2. Chat with video, interactiveFalse, width400, elem_classesexample)example3 gr.Image(valuehttp://thunlp.oss-cn-qingdao.aliyuncs.com/multi_modal/never_delete/fshot.gif, label3. Few shot, interactiveFalse, width400, elem_classesexample)# launch
demo.launch(shareFalse, debugTrue, show_apiFalse, server_port8885, server_name0.0.0.0)
#第一次运行web_demo_2.6.py报错如下
File /Usxxxxxxxckages/torch/nn/modules/module.py, line 1158, in convert
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
TypeError: BFloat16 is not supported on MPS#重装依赖
pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu#再次运行就没问题了
#这里下载模型20g可能会等一段时间最后借助魔法下载看这网速在疯狂跑就没问题
#成功运行输出如下
Loading checkpoint shards: 100%|██████████| 4/4 [00:2100:00, 5.33s/it]
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Running on local URL: http://0.0.0.0:8885To create a public link, set shareTrue in launch().
IMPORTANT: You are using gradio version 4.22.0, however version 4.29.0 is available, please upgrade.
--------效果展示
图片理解
Sampling解码 Beam Search解码 视频理解
Sampling解码 Beam Search解码 系统占用 总结
解决flash_attn强制依赖问题解决bfloat16在mps无法使用问题看系统占用是没走mps添加的环境变量也可以看出Sampling瞎回答Beam Search回答很惊喜Beam Search处理视频4秒在m1pro下当前代码中需要230s左右ollama部署还在研究中…
写到最后