-
Notifications
You must be signed in to change notification settings - Fork 44
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
dockerfile参考,已跑通模型 #7
Comments
<?php
define('BASE_PATH', __DIR__ . '/..');
require BASE_PATH . '/vendor/autoload.php';
require __DIR__ . '/utils.php';
use Dotenv\Dotenv;
use Dotenv\Repository\Adapter;
use Dotenv\Repository\RepositoryBuilder;
use function Laravel\Prompts\text;
use function Laravel\Prompts\info;
use function Laravel\Prompts\error;
load_env();
$os = PyCore::import('os');
$platform = PyCore::import('platform');
$transformers = PyCore::import('transformers');
$AutoModel = $transformers->AutoModel;
$AutoTokenizer = $transformers->AutoTokenizer;
$torch = PyCore::import('torch');
$MODEL_PATH = getenv('MODEL_PATH') ?: 'THUDM/chatglm3-6b';
$TOKENIZER_PATH = getenv("TOKENIZER_PATH") ?: $MODEL_PATH;
$DEVICE = $torch->cuda->is_available() ? 'cuda' : 'cpu';
$tokenizer = $AutoTokenizer->from_pretrained($TOKENIZER_PATH, trust_remote_code: true);
if ($DEVICE == 'cuda') {
# AMD, NVIDIA GPU can use Half Precision
// $model = $AutoModel->from_pretrained($MODEL_PATH, trust_remote_code: true)->to($DEVICE)->eval();
$model = load_model_on_gpus($MODEL_PATH, $torch->cuda->device_count());
} else {
# CPU, Intel GPU and other GPU can use Float16 Precision Only
$model = $AutoModel->from_pretrained($MODEL_PATH, trust_remote_code: true)->float()->to($DEVICE)->eval();
}
$welcome = '欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序';
$past_key_values = null;
$history = [];
$stop_stream = false;
info($welcome);
while (true) {
$query = text('用户:');
if (trim($query) == 'stop') {
break;
} elseif (trim($query) == 'clear') {
$past_key_values = null;
$history = [];
info("\033c");
info($welcome);
continue;
}
info('ChatGLM: ');
try {
$current_length = 0;
$rs = $model->stream_chat($tokenizer,
$query,
history: $history,
top_p: 1,
temperature: 0.01,
past_key_values: $past_key_values,
return_past_key_values: true
);
$it = PyCore::iter($rs);
echo " \e[32m";
while ($next = PyCore::next($it)) {
if ($stop_stream) {
$stop_stream = false;
break;
} else {
list($response, $history, $past_key_values) = PyCore::scalar($next);
echo mb_substr($response, $current_length);
$current_length = mb_strlen($response);
}
}
echo "\e[39m\n";
} catch (\Throwable $e) {
error($e->getMessage() ?: '执行出错了');
}
}
function load_env()
{
$repository = RepositoryBuilder::createWithNoAdapters()
->addAdapter(Adapter\PutenvAdapter::class)
->immutable()
->make();
Dotenv::create($repository, [BASE_PATH])->safeLoad();
} |
运行examples/paddlenlp/test.php教程
|
10分钟快速上手飞浆: 手写数字识别任务
<?php
ini_set('memory_limit', '2G');
$paddle = PyCore::import('paddle');
$np = PyCore::import('numpy');
$Normalize = PyCore::import('paddle.vision.transforms')->Normalize;
$transform = $Normalize(mean: [127.5], std: [127.5], data_format: 'CHW');
# 下载数据集并初始化 DataSet
$train_dataset = $paddle->vision->datasets->MNIST(mode: 'train', transform: $transform);
$test_dataset = $paddle->vision->datasets->MNIST(mode: 'test', transform: $transform);
# 模型组网并初始化网络
$lenet = $paddle->vision->models->LeNet(num_classes: 10);
$model = $paddle->Model($lenet);
# 模型训练的配置准备,准备损失函数,优化器和评价指标
$model->prepare(
$paddle->optimizer->Adam(parameters: $model->parameters()),
$paddle->nn->CrossEntropyLoss(),
$paddle->metric->Accuracy()
);
# 模型训练
$model->fit($train_dataset, epochs: 5, batch_size: 64, verbose: 1);
# 模型评估
$model->evaluate($test_dataset, batch_size: 64, verbose: 1);
# 保存模型
$model->save('./output/mnist');
# 加载模型
$model->load('output/mnist');
# 从测试集中取出一张图片
list($img, $label) = $test_dataset->__getitem__(0);
# 将图片shape从1*28*28变为1*1*28*28,增加一个batch维度,以匹配模型输入格式要求
$img_batch = $np->expand_dims($img->astype('float32'), axis: 0);
# 执行推理并打印结果,此处predict_batch返回的是一个list,取出其中数据获得预测结果
$out = $model->predict_batch($img_batch)[0];
$pred_label = $out->argmax();
PyCore::print(PyCore::str('true label: {}, pred label: {}')->format($label->__getitem__(0), $pred_label));
# 可视化图片
$plt = PyCore::import('matplotlib.pyplot');
$plt->imshow($img->__getitem__(0));
# 容器没有gui
$plt->imsave('./output/img.png', $img->__getitem__(0)); |
你可以基于 modelscope (推荐)、huggingface 、
|
能弄个cpu的的吗,只跑验证码识别 |
示例代码 <?php
/**
* @link https://modelscope.cn/models/damo/cv_convnextTiny_ocr-recognition-general_damo/summary
*/
$pipeline = PyCore::import('modelscope.pipelines')->pipeline;
$Tasks = PyCore::import('modelscope.utils.constant')->Tasks;
$os = PyCore::import('os');
// 模型可以换成 xiaolv/ocr_small
$pipe = $pipeline($Tasks->ocr_recognition, model: 'damo/cv_convnextTiny_ocr-recognition-general_damo');
$file = './captcha.png';
file_put_contents($file, file_get_contents('https://business.swoole.com/page/captcha_register'));
echo '识别结果:' . $pipe($file)['text'][0], PHP_EOL; 环境 pip config set global.index-url https://mirrors.aliyun.com/pypi/simple
pip config set install.trusted-host mirrors.aliyun.com
pip install -U pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install modelscope transformers SentencePiece opencv-python |
@he426100 示例可以加入到 phpy 的 examples 中 |
用下面这个,又快又准 |
docker-php的安装脚本 基于飞桨官方进行内安装运行后报错 容器环境是ubuntu20的 ++ dpkg-architecture --query DEB_BUILD_GNU_TYPE |
paddle-2.6.0 需要先执行 编译phpy命令需改成 phpize && \
./configure --with-python-config=/usr/bin/python3.10-config && \
make install && \
echo "extension=phpy.so" > /usr/local/etc/php/conf.d/20_phpy.ini && \
php --ri phpy |
docker-php的安装脚本 基于飞桨官方进行内安装运行后报错 容器环境是ubuntu20的 ++ dpkg-architecture --query DEB_BUILD_GNU_TYPE
嗯已安装dpkg-dev pkg-config 在测试编译谢谢 |
环境
Ubuntu 22.04.3 LTS
NVIDIA-SMI 520.61.05 Driver Version: 520.61.05 CUDA Version: 11.8
3080x2
效果
Dockefile
The text was updated successfully, but these errors were encountered: