亚洲在线久爱草,狠狠天天香蕉网,天天搞日日干久草,伊人亚洲日本欧美

為了賬號安全,請及時綁定郵箱和手機立即綁定
已解決430363個問題,去搜搜看,總會有你想問的

面向對象的圖像多處理

面向對象的圖像多處理

動漫人物 2023-02-22 19:10:41
我想使用 multiprocessing 與我的班級并行分析多張圖像:class SegmentationType(object):    DISPLAY_NAME = "invalid"    def __init__(self, filename, path):        self.filename = filename        self.path = path        self.input_data = None        self.output_data = None    def read_image(self):        self.input_data =  cv2.imread(self.path + self.filename)[1]    def write_image(self):        cv2.imwrite(self.path + self.filename.split('.')[0] + '_' + self.DISPLAY_NAME + '.png', self.output_data)    def process(self):        # override in derived classes to perform an actual segmentation        pass    def start_pipeline(self):        self.read_image()        self.process()        self.write_image()class HSV_Segmenter(SegmentationType):    DISPLAY_NAME = 'HSV'    def process(self):        source = rgb_to_hsv(self.input_data)        self.output_data = treshold_otsu(source)class LabSegmenter(SegmentationType):    DISPLAY_NAME = 'LAB'    def process(self):        source = rgb_to_lab(self.input_data)        self.output_data = global_threshold(source)segmenter_class = {'hsv': HSV_Segmentation,'lab': LAB_Segmenter}.get(procedure)if not segmenter_class:    raise ArgumentError("Invalid segmentation method '{}'".format(procedure))for img in images:    os.chdir(img_dir)    processor =  = segmenter_class(img, img_dir, procedure)    processor.start_pipeline()但是,我不確定如何調用地圖功能:image_lst = os.listdir(my_image_path)# We split the list into sublist with 5 elements because of 512 GB RAM limitationif len(image_lst) > 4:    nr_of_sublists = int(len(image_lst)/2.5)    image_sub_lst  =np.array_split(image_lst, nr_of_sublists)else:    image_sub_lst = [image_lst]# We do the analysis for each sublistfor sub_lst in image_sub_lst:    print (sub_lst)    pool = multiprocessing.Pool(8)        # Call the processor     processor =  = segmenter_class(img, img_dir, procedure)    processor.start_pipeline()    # How to call map???    pool.map(?, sub_lst)    pool.terminate()    
查看完整描述

1 回答

?
慕婉清6462132

TA貢獻1804條經驗 獲得超2個贊

你必須用對創建列表(filename, path)


data = [(img, img_dir) for img in images]

然后 map 將在單獨的進程中運行每一對。


但你必須args進去start_pipeline


    def start_pipeline(self, args):

        print('ok starting')

        

        filename, path = args

        print('filename: {}\npath: {}'.format(filename, path))

        

        return self.process()

你必須使用()創建類的實例segmenter_class來使用start_pipeline


pool.map(segmenter_class().start_pipeline, data)

順便說一句:在示例代碼中,我還返回了過程的結果。


import os

import multiprocessing


class SegmentationType(object):

    DISPLAY_NAME = "invalid"


    def __init__(self):

        print('init')


    def read_image(self):

        print('read')


    def write_image(self):

        print('write')


    def process(self):

        # override in derived classes to perform an actual segmentation

        pass


    def start_pipeline(self, args):

        print('ok starting')

        

        filename, path = args

        print('filename: {}\npath: {}'.format(filename, path))

        

        return self.process()


class HSV_Segmenter(SegmentationType):

    DISPLAY_NAME = 'HSV'


    def process(self):

        print('ok HSV')

        return "result HSV"

    

class LabSegmenter(SegmentationType):

    DISPLAY_NAME = 'LAB'


    def process(self):

        print('ok LAB')

        return "result LAB"


if __name__ == '__main__':


    procedure = 'hsv'

    

    segmenter_class = {

        'hsv': HSV_Segmenter,

        'lab': LabSegmenter,

    }.get(procedure)

    

    images = ['01.png', '02.png', '03.png']

    img_dir = 'C:/'

    

    data = [(img, img_dir) for img in images]

    

    pool = multiprocessing.Pool(3)


    # example 1


    results = pool.map(segmenter_class().start_pipeline, data)

    print('Results:', results)


    # example 2


    for result in pool.map(segmenter_class().start_pipeline, data):

        print('result:', result)


    pool.terminate()

編輯:


您還可以創建獲取然后使用它的函數procedure-data這樣map每個進程都會創建自己的實例,procedure或者您可以將不同的過程發送到不同的進程。


import os

import multiprocessing


class SegmentationType(object):

    DISPLAY_NAME = "invalid"


    def __init__(self):

        print('init')


    def read_image(self):

        print('read')


    def write_image(self):

        print('write')


    def process(self):

        # override in derived classes to perform an actual segmentation

        pass


    def start_pipeline(self, args):

        print('ok starting')

        

        filename, path = args

        print('filename: {}\npath: {}'.format(filename, path))

        

        return self.process()


class HSV_Segmenter(SegmentationType):

    DISPLAY_NAME = 'HSV'


    def process(self):

        print('ok HSV')

        return "result HSV"

    

class LabSegmenter(SegmentationType):

    DISPLAY_NAME = 'LAB'


    def process(self):

        print('ok LAB')

        return "result LAB"


segmenters = {

    'hsv': HSV_Segmenter,

    'lab': LabSegmenter,

}


def start_process(args):


    procedure = args[0]

    data = args[1:]


    segmenter_class = segmenters.get(procedure)

    result = segmenter_class().start_pipeline(data)


    return result

    

if __name__ == '__main__':


    procedure = 'hsv'

    

    images = ['01.png', '02.png', '03.png']

    img_dir = 'C:/'

    

    data = [(procedure, img, img_dir) for img in images]

    

    pool = multiprocessing.Pool(3)


    # example 1


    results = pool.map(start_process, data)

    print('Results:', results)


    # example 2


    for result in pool.map(segmenter_class().start_pipeline, data):

        print('result:', result)


    pool.terminate()

不同程序的示例


if __name__ == '__main__':


    images = ['01.png', '02.png', '03.png']

    img_dir = 'C:/'

    

    pool = multiprocessing.Pool(3)


    data = [('hsv', img, img_dir) for img in images]

    results = pool.map(start_process, data)

    print('Results HSV:', results)


    data = [('lab', img, img_dir) for img in images]

    results = pool.map(start_process, data)

    print('Results LAB:', results)


    pool.terminate()

和一個一樣map()。有 6 個進程要啟動,Pool(3)因此它只會同時運行 3 個進程,當它有空閑進程時,map將從列表中獲取下一個值并運行進程。


if __name__ == '__main__':


    images = ['01.png', '02.png', '03.png']

    img_dir = 'C:/'

    

    data_hsv = [('hsv', img, img_dir) for img in images]

    data_lab = [('lab', img, img_dir) for img in images]

    

    data = data_hsv + data_lab


    pool = multiprocessing.Pool(3)


    # example 1


    results = pool.map(start_process, data)

    print('Results:', results)


    # example 2


    for result in pool.map(start_process, data):

        print('results:', result)


    pool.terminate()

編輯:

它也適用于Ray

它只需要

from ray.util import multiprocessing

代替

import multiprocessing

我沒有用Dask、PySparkJoblib測試它


編輯:

Joblib示例

from joblib import Parallel, delayed


class SegmentationType(object):

    DISPLAY_NAME = "invalid"


    def __init__(self):

        print('init')


    def read_image(self):

        print('read')


    def write_image(self):

        print('write')


    def process(self):

        # override in derived classes to perform an actual segmentation

        pass


    def start_pipeline(self, args):

        print('ok starting')

        

        filename, path = args

        print('filename: {}\npath: {}'.format(filename, path))

        

        return self.process()


class HSV_Segmenter(SegmentationType):

    DISPLAY_NAME = 'HSV'


    def process(self):

        print('ok HSV')

        return "result HSV"


class LabSegmenter(SegmentationType):

    DISPLAY_NAME = 'LAB'


    def process(self):

        print('ok LAB')

        return "result LAB"


segmenters = {

    'hsv': HSV_Segmenter,

    'lab': LabSegmenter,

}


def start_process(args):

    

    procedure = args[0]

    data = args[1:]

    

    segmenter_class = segmenters.get(procedure)

    result = segmenter_class().start_pipeline(data)

    

    return result


if __name__ == '__main__':


    images = ['01.png', '02.png', '03.png']

    img_dir = 'C:/'

    

    data_hsv = [('hsv', img, img_dir) for img in images]

    data_lab = [('lab', img, img_dir) for img in images]

    

    data = data_hsv + data_lab


    # --- version 1 ---


    #pool = Parallel(n_jobs=3, backend='threading')

    #pool = Parallel(n_jobs=3, backend='multiprocessing')

    pool = Parallel(n_jobs=3)

    

    # example 1

    

    results = pool( delayed(start_process)(args) for args in data )

    print('Results:', results)


    # example 2

    

    for result in pool( delayed(start_process)(args) for args in data ):

        print('result:', result)


    # --- version 2 ---

    

    #with Parallel(n_jobs=3, backend='threading') as pool:

    #with Parallel(n_jobs=3, backend='multiprocessing') as pool:

    with Parallel(n_jobs=3) as pool:


        # example 1

        

        results = pool( delayed(start_process)(args) for args in data )

        print('Results:', results)


        # example 1


        for result in pool( delayed(start_process)(args) for args in data ):

            print('result:', result)


查看完整回答
反對 回復 2023-02-22
  • 1 回答
  • 0 關注
  • 63 瀏覽
慕課專欄
更多

添加回答

舉報

0/150
提交
取消
微信客服

購課補貼
聯系客服咨詢優惠詳情

幫助反饋 APP下載

慕課網APP
您的移動學習伙伴

公眾號

掃描二維碼
關注慕課網微信公眾號