def createVideoWindow(self, media_player): videoWidget = QtGui.QMacCocoaViewContainer(None) self.videoLayout.addWidget(videoWidget) videoView = VLCVideoView.alloc().init() videoWidget.setCocoaView(sip.voidptr(objc.pyobjc_id(videoView))) media_player.set_nsobject(objc.pyobjc_id(videoView)) videoView.release()
def renderView(self): if self.img is None: ## make sure shm is large enough and get its address if self.width() == 0 or self.height() == 0: return size = self.width() * self.height() * 4 if size > self.shm.size(): if sys.platform.startswith("win"): ## windows says "WindowsError: [Error 87] the parameter is incorrect" if we try to resize the mmap self.shm.close() ## it also says (sometimes) 'access is denied' if we try to reuse the tag. self.shmtag = "pyqtgraph_shmem_" + "".join( [chr((random.getrandbits(20) % 25) + 97) for i in range(20)] self.shm = mmap.mmap(-1, size, self.shmtag) else: self.shm.resize(size) ## render the scene directly to shared memory if USE_PYSIDE: ch = ctypes.c_char.from_buffer(self.shm, 0) # ch = ctypes.c_char_p(address) self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32) else: address = ctypes.addressof(ctypes.c_char.from_buffer(self.shm, 0)) self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32) self.img.fill(0xFFFFFFFF) p = QtGui.QPainter(self.img) self.render(p, self.viewRect(), self.rect()) p.end() self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))
def on_start(self): if self._write_output: if self.video_out_fpath.exists(): self.video_out_fpath.unlink() scene_rect = self.gs.sceneRect() desired_size = numpy.array((scene_rect.width(), scene_rect.height()), dtype=int) desired_size *= self.scale_factor # Non-divisble-by-two value width or height causes problems for some codecs desired_size += desired_size % 2 # 24-bit RGB QImage rows are padded to 32-bit chunks, which we must match row_stride = desired_size[0] * 3 row_stride_unpadding = row_stride % 4 if row_stride_unpadding > 0: row_stride_padding = 4 - row_stride_unpadding row_stride += row_stride_padding # If NPY_RELAXED_STRIDES_CHECKING=1 was defined when your copy of numpy was built, the following commented code would work. However, typically, # NPY_RELAXED_STRIDES_CHECKING=1 is not set, although the numpy manual states that it is eventually to become standard. # self._buffer = numpy.ndarray( # shape=(desired_size[1], desired_size[0], 3), # strides=(row_stride, 3, 1), # dtype=numpy.uint8) # self._qbuffer = Qt.QImage(sip.voidptr(self._buffer.ctypes.data), desired_size[0], desired_size[1], Qt.QImage.Format_RGB888) # Making the buffer wide enough to accommodate any padding and feeding a view of the buffer that excludes the padded regions works everywhere. self._buffer = numpy.empty(row_stride * desired_size[1], dtype=numpy.uint8) bdr = self._buffer.reshape((desired_size[1], row_stride)) bdr = bdr[:, :desired_size[0]*3] self._buffer_data_region = bdr.reshape((desired_size[1], desired_size[0], 3)) self._qbuffer = Qt.QImage(sip.voidptr(self._buffer.ctypes.data), desired_size[0], desired_size[1], Qt.QImage.Format_RGB888) # self.log_file = open(str(self._dpath / 'video.log'), 'w') self.ffmpeg_writer = FFMPEG_VideoWriter(str(self.video_out_fpath), desired_size, fps=self.video_fps, codec='mpeg4', preset='veryslow', bitrate='15000k')#, logfile=self.log_file) self._displayed_frame_idx = -1 self.frame_index_changed.emit(self._displayed_frame_idx)
def createImage0(self): '''Create an QImage object, the copy data from other buffer to the image buffer. self.buff = ctypes.create_string_buffer('\x7F'*512*512) image = QtGui.QImage(sip.voidptr(ctypes.addressof(self.buff)), 512, 512, QtGui.QImage.Format_Indexed8) image.setColorTable(self.colorTable) self.images.append(image)
def createVideoWindow(self,media_player): videoWidget = QtGui.QMacCocoaViewContainer(None) self.videoLayout.addWidget(videoWidget) videoView = VLCVideoView.alloc().init() videoWidget.setCocoaView(sip.voidptr(objc.pyobjc_id(videoView))) media_player.set_nsobject(objc.pyobjc_id(videoView)) videoView.release()
def _init_images(self): A, = 'A'.encode('ascii') gs = Qt.QGraphicsScene() sti = Qt.QGraphicsSimpleTextItem() sti.setFont(Qt.QFont('Courier', pointSize=24, weight=Qt.QFont.Bold)) gs.addItem(sti) self.images = [] for char in range(A, A + 26): for i in range(0, 10): text = bytes([char]).decode('ascii') + str(i) sti.setText(text) scene_rect_f = gs.itemsBoundingRect() scene_rect = Qt.QRect( math.ceil(scene_rect_f.width()), math.ceil(scene_rect_f.height())) gs.setSceneRect(scene_rect_f) buffer = numpy.empty((scene_rect.height(), scene_rect.width(), 4), dtype=numpy.uint8) buffer[:] = 255 qimage = Qt.QImage(sip.voidptr(buffer.ctypes.data), scene_rect.size().width(), scene_rect.size().height(), Qt.QImage.Format_RGBA8888) qpainter = Qt.QPainter() qpainter.begin(qimage) qpainter.setRenderHint(Qt.QPainter.Antialiasing) qpainter.setRenderHint(Qt.QPainter.HighQualityAntialiasing) gs.render(qpainter) qpainter.end() self.images.append(Image(buffer.copy(), shape_is_width_height=False, name=text))
def __init__(self): # Init the main window. qt.QMainWindow.__init__(self) self.resize(350, 350) # Create the central widget. self.CentralWidget = qt.QWidget(self) self.setCentralWidget(self.CentralWidget) self.Layout = qt.QGridLayout(self.CentralWidget) # Create a button. self.QuitButton = qt.QPushButton(self.centralWidget()) self.QuitButton.setText('Quit') self.Layout.addWidget(self.QuitButton, 1, 0) # Connect the button. qt.QObject.connect(self.QuitButton, qt.SIGNAL('clicked()'), self.quit) # Create a root histogram. self.hist = ROOT.TH1F("pipo","pipo", 100, 0, 100) # Create the main TQtWidget (using sip to get the pointer to the central widget). self.Address = sip.unwrapinstance(self.CentralWidget) self.Canvas = ROOT.TQtWidget(sip.voidptr(self.Address).ascobject()) # Place the TQtWidget in the main grid layout and draw the histogram. self.Layout.addWidget(sip.wrapinstance(ROOT.AddressOf(self.Canvas)[0],qt.QWidget), 0, 0) self.hist.Draw()
def renderView(self): if self.img is None: ## make sure shm is large enough and get its address if self.width() == 0 or self.height() == 0: return size = self.width() * self.height() * 4 if size > self.shm.size(): if sys.platform.startswith('win'): ## windows says "WindowsError: [Error 87] the parameter is incorrect" if we try to resize the mmap self.shm.close() ## it also says (sometimes) 'access is denied' if we try to reuse the tag. self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)]) self.shm = mmap.mmap(-1, size, self.shmtag) else: self.shm.resize(size) ## render the scene directly to shared memory address = ctypes.addressof(ctypes.c_char.from_buffer(self.shm, 0)) # different versions of pyqt have different requirements here.. self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32) except TypeError: self.img = QtGui.QImage(memoryview(buffer(self.shm)), self.width(), self.height(), QtGui.QImage.Format_ARGB32) except TypeError: # Works on PyQt 4.9.6 self.img = QtGui.QImage(address, self.width(), self.height(), QtGui.QImage.Format_ARGB32) self.img.fill(0xffffffff) p = QtGui.QPainter(self.img) self.render(p, self.viewRect(), self.rect()) p.end() self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))
def renderView(self): if self.img is None: ## make sure shm is large enough and get its address if self.width() == 0 or self.height() == 0: return size = self.width() * self.height() * 4 if size > self.shm.size(): if sys.platform.startswith('win'): ## windows says "WindowsError: [Error 87] the parameter is incorrect" if we try to resize the mmap self.shm.close() ## it also says (sometimes) 'access is denied' if we try to reuse the tag. self.shmtag = "pyqtgraph_shmem_" + ''.join([ chr((random.getrandbits(20) % 25) + 97) for i in range(20) self.shm = mmap.mmap(-1, size, self.shmtag) elif sys.platform == 'darwin': self.shm.close() self.shmFile.close() self.shmFile = tempfile.NamedTemporaryFile( prefix='pyqtgraph_shmem_') self.shmFile.write(b'\x00' * (size + 1)) self.shmFile.flush() self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_WRITE) else: self.shm.resize(size) ## render the scene directly to shared memory if QT_LIB in ['PySide', 'PySide2']: ch = ctypes.c_char.from_buffer(self.shm, 0) #ch = ctypes.c_char_p(address) self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32) else: address = ctypes.addressof( ctypes.c_char.from_buffer(self.shm, 0)) # different versions of pyqt have different requirements here.. self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32) except TypeError: self.img = QtGui.QImage(memoryview(buffer(self.shm)), self.width(), self.height(), QtGui.QImage.Format_ARGB32) except TypeError: # Works on PyQt 4.9.6 self.img = QtGui.QImage(address, self.width(), self.height(), QtGui.QImage.Format_ARGB32) self.img.fill(0xffffffff) p = QtGui.QPainter(self.img) self.render(p, self.viewRect(), self.rect()) p.end() self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))
def __init__(self, parent): Constructor super(MainDisplay, self).__init__(parent) self.screens = ScreenList() self.rebuild_css = False self.hide_mode = None self.override = {} self.retranslateUi() self.media_object = None if self.is_live: self.audio_player = AudioPlayer(self) else: self.audio_player = None self.first_time = True self.web_loaded = True self.setStyleSheet(OPAQUE_STYLESHEET) window_flags = QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool | QtCore.Qt.WindowStaysOnTopHint if Settings().value('advanced/x11 bypass wm'): window_flags |= QtCore.Qt.X11BypassWindowManagerHint # TODO: The following combination of window_flags works correctly # on Mac OS X. For next OpenLP version we should test it on other # platforms. For OpenLP 2.0 keep it only for OS X to not cause any # regressions on other platforms. if is_macosx(): window_flags = QtCore.Qt.FramelessWindowHint | QtCore.Qt.Window self.setWindowFlags(window_flags) self.setAttribute(QtCore.Qt.WA_DeleteOnClose) self.set_transparency(False) if is_macosx(): if self.is_live: # Get a pointer to the underlying NSView nsview_pointer = self.winId().ascapsule() except: nsview_pointer = voidptr(self.winId()).ascapsule() # Set PyCapsule name so pyobjc will accept it pythonapi.PyCapsule_SetName.restype = c_void_p pythonapi.PyCapsule_SetName.argtypes = [py_object, c_char_p] pythonapi.PyCapsule_SetName(nsview_pointer, c_char_p(b"objc.__object__")) # Covert the NSView pointer into a pyobjc NSView object self.pyobjc_nsview = objc_object(cobject=nsview_pointer) # Set the window level so that the MainDisplay is above the menu bar and dock self.pyobjc_nsview.window().setLevel_(NSMainMenuWindowLevel + 2) # Set the collection behavior so the window is visible when Mission Control is activated self.pyobjc_nsview.window().setCollectionBehavior_(NSWindowCollectionBehaviorManaged) if self.screens.current['primary']: # Connect focusWindowChanged signal so we can change the window level when the display is not in # focus on the primary screen self.application.focusWindowChanged.connect(self.change_window_level) if self.is_live: Registry().register_function('live_display_hide', self.hide_display) Registry().register_function('live_display_show', self.show_display) Registry().register_function('update_display_css', self.css_changed) self.close_display = False
def snap(self, no_cancel=0): "Snap a picture, returning a PIL image object with the results" (mode, last_frame, (xsize, ysize), depth, bytes_per_line) = self.get_parameters() if mode in ["gray", "red", "green", "blue"]: format = 32 elif mode == "color": format = 32 else: raise ValueError('got unknown "mode" from self.get_parameters()') im = QImage(xsize, ysize, format) self.dev.snap(sip.voidptr(sip.unwrapinstance(im)), no_cancel) return im
def snap(self, no_cancel=0): "Snap a picture, returning a PIL image object with the results" (mode, last_frame, (xsize, ysize), depth, bytes_per_line) = self.get_parameters() if mode in ['gray', 'red', 'green', 'blue']: format = 32 elif mode == 'color': format = 32 else: raise ValueError('got unknown "mode" from self.get_parameters()') im = QImage(xsize, ysize, format) self.dev.snap(sip.voidptr(sip.unwrapinstance(im)), no_cancel) return im
def load_render_preview(self, filename): """ Using OIIO load preview of a rendered frame. pixels, w, h = utilities.get_rawpixels_from_file(filename) if not pixels: return # PyQt: import sip, ctypes buff = ctypes.create_string_buffer(pixels.tostring()) image = QImage(sip.voidptr(buff), w, h, QImage.Format_RGB888) pixmap = QtGui.QPixmap.fromImage(image) size = self.image_tab.size() scaled = pixmap.scaledToWidth(size.width()) self.image_view.setPixmap(scaled) self.image_view.adjustSize()
def load_render_preview(self, filename): """ Using OIIO load preview of a rendered frame. pixels, w, h = utilities.get_rawpixels_from_file(filename) if not pixels: return # PyQt: import sip, ctypes buff = ctypes.create_string_buffer(pixels.tostring()) image = QImage(sip.voidptr(buff), w, h, QImage.Format_RGB888) pixmap = QtGui.QPixmap.fromImage(image) size = self.image_tab.size() scaled = pixmap.scaledToWidth(size.width()) self.image_view.setPixmap(scaled) self.image_view.adjustSize()
def renderView(self): if self.img is None: ## make sure shm is large enough and get its address if self.width() == 0 or self.height() == 0: return size = self.width() * self.height() * 4 if size > self.shm.size(): if sys.platform.startswith('win'): ## windows says "WindowsError: [Error 87] the parameter is incorrect" if we try to resize the mmap self.shm.close() ## it also says (sometimes) 'access is denied' if we try to reuse the tag. self.shmtag = "pyqtgraph_shmem_" + ''.join([ chr((random.getrandbits(20) % 25) + 97) for i in range(20) self.shm = mmap.mmap(-1, size, self.shmtag) else: self.shm.resize(size) ## render the scene directly to shared memory if USE_PYSIDE: ch = ctypes.c_char.from_buffer(self.shm, 0) #ch = ctypes.c_char_p(address) self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32) else: address = ctypes.addressof( ctypes.c_char.from_buffer(self.shm, 0)) self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32) self.img.fill(0xffffffff) p = QtGui.QPainter(self.img) self.render(p, self.viewRect(), self.rect()) p.end() self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))
def test_macosx_display(self): Test display on Mac OS X # GIVEN: A new SlideController instance on Mac OS X. self.screens.set_current_display(0) display = MagicMock() # WHEN: The default controller is built and a reference to the underlying NSView is stored. main_display = MainDisplay(display) nsview_pointer = main_display.winId().ascapsule() except: nsview_pointer = voidptr(main_display.winId()).ascapsule() pythonapi.PyCapsule_SetName.restype = c_void_p pythonapi.PyCapsule_SetName.argtypes = [py_object, c_char_p] pythonapi.PyCapsule_SetName(nsview_pointer, c_char_p(b"objc.__object__")) pyobjc_nsview = objc_object(cobject=nsview_pointer) # THEN: The window level and collection behavior should be the same as those needed for Mac OS X. self.assertEqual(pyobjc_nsview.window().level(), NSMainMenuWindowLevel + 2, 'Window level should be NSMainMenuWindowLevel + 2') self.assertEqual(pyobjc_nsview.window().collectionBehavior(), NSWindowCollectionBehaviorManaged, 'Window collection behavior should be NSWindowCollectionBehaviorManaged')
def updateImage(self): def chunks(seq): n = 4 for i in range(0, len(seq), n): yield tuple( ord(x) for x in seq[i:i + n] ) def unchunk(seq): for s in seq: for sn in s: yield sn oi = self.original_image.convertToFormat(QtGui.QImage.Format_ARGB32) size = oi.size() qc = self.qcolor for y in range(0, size.height()): scan = oi.scanLine(y) scan2 = sip.voidptr(address=int(scan), size=4 * size.width()) # scan2 is a,r,g,b = x[1],x[2],x[3],x[0] pix = [(x[2],x[1],x[0],x[3]) for x in chunks(scan2)] pix = color_blend(pix, (qc.red(), qc.green(), qc.blue()), None) pix = [(x[2],x[1],x[0],x[3]) for x in pix] x = 0 for p in unchunk(pix): scan2[x] = chr(p) x += 1 oi = QtGui.QPixmap.fromImage( oi ) self.lbl.setPixmap(oi)
def average(image): return _extractor.average(sip.voidptr(sip.unwrapinstance(image)))
def nextImage(image): return _extractor.nextImage(sip.voidptr(sip.unwrapinstance(image)))
def extract(image,maxDiff,rgb,minSize): return _extractor.extract(sip.voidptr(sip.unwrapinstance(image)),maxDiff,rgb,minSize)
def read_image_file_into_qpixmap_item(cls, im_fpath, qpixmap_item, required_size=None): im = cls.normalize_intensity(freeimage.read(str(im_fpath))) qim = Qt.QImage(sip.voidptr(im.ctypes.data), im.shape[0], im.shape[1], Qt.QImage.Format_RGB888) if required_size is not None and qim.size() != required_size: raise ValueError('Expected {}x{} image, but "{}" is {}x{}.'.format(required_size.width(), required_size.height(), str(im_fpath), qim.size().width(), qim.size().height())) qpixmap_item.setPixmap(Qt.QPixmap(qim))
def extract(image, maxDiff, rgb, minSize): return _extractor.extract(sip.voidptr(sip.unwrapinstance(image)), maxDiff, rgb, minSize)
def to_img_fast( img, scalar_type=None, lut=None, forceNativeLut=False): """Transform an image array into a QImage. The implementation tries to minimize data copies and casting by determining the exact flags for QImage and feeding it with the data pointer. :Returns Type: QImage import sip l_sh = len(img.shape) if l_sh == 3: vdim = img.shape[2] img = img.transpose(1,0,2) elif l_sh == 4: vdim = img.shape[3] img = img.transpose(1,0,0,2) elif l_sh == 2: vdim = 1 img = img.transpose(1,0) else: raise Exception("Unknown image shape, cannot deduce pixel format") if vdim == 1: # : We are working on scalar images, this includes argb32 encoded images if scalar_type == None: # -- make sure we always have a default lut to display indexed things -- cmax = img.max() if lut is None: lut = palette_factory("grayscale", cmax) # -- if all values fit within a uint8, cast and operate on it. # Currently doesn't work on non-square images maybe because all # data is not 32bits aligned. -- if forceNativeLut: #cmax <= 255: print "using native 8bit color map" if img.dtype != uint8 : img = uint8(img) qim = QImage(sip.voidptr(img.ctypes.data), img.shape[0], img.shape[1], QImage.Format_Indexed8) qim.setColorTable(lut.tolist()) return qim.copy() else: # -- QImages currently only allow indexing of 8bit images (up to value 255). # This disqualifies anything that has cmax > 255. However, these can be handled # on our side by converting them to RGBA and processing them as such -- # print "casting from indexed to argb32" img = lut[img] # : this creates an rgba image (len(shape)==2) return to_img(img, scalar_type="argb32") elif scalar_type=="argb32": print "using native scalar argb32" qim = QImage(sip.voidptr(img.ctypes.data), img.shape[0], img.shape[1], QImage.Format_ARGB32).copy() return qim elif vdim in [3,4] : # : We are working on vectorial things like RGB ... data = img.ctypes.data if vdim == 3: print "using native vectorial rgb888" fmt = QImage.Format_RGB888 elif vdim == 4: # ... or RGBA print "using native vectorial argb32" fmt = QImage.Format_ARGB32 else: raise Exception("Unhandled vectorial pixel type") qim = QImage(sip.voidptr(data), img.shape[0], img.shape[1], fmt) return qim.copy() else: raise Exception( "Arrays of shape length %s are not handled" % l_sh )
def to_img_fast(img, scalar_type=None, lut=None, forceNativeLut=False): """Transform an image array into a QImage. The implementation tries to minimize data copies and casting by determining the exact flags for QImage and feeding it with the data pointer. :Returns Type: QImage import sip l_sh = len(img.shape) if l_sh == 3: vdim = img.shape[2] img = img.transpose(1, 0, 2) elif l_sh == 4: vdim = img.shape[3] img = img.transpose(1, 0, 0, 2) elif l_sh == 2: vdim = 1 img = img.transpose(1, 0) else: raise Exception("Unknown image shape, cannot deduce pixel format") if vdim == 1: # : We are working on scalar images, this includes argb32 encoded images if scalar_type == None: # -- make sure we always have a default lut to display indexed things -- cmax = img.max() if lut is None: lut = palette_factory("grayscale", cmax) # -- if all values fit within a uint8, cast and operate on it. # Currently doesn't work on non-square images maybe because all # data is not 32bits aligned. -- if forceNativeLut: #cmax <= 255: print "using native 8bit color map" if img.dtype != uint8: img = uint8(img) qim = QImage(sip.voidptr(img.ctypes.data), img.shape[0], img.shape[1], QImage.Format_Indexed8) qim.setColorTable(lut.tolist()) return qim.copy() else: # -- QImages currently only allow indexing of 8bit images (up to value 255). # This disqualifies anything that has cmax > 255. However, these can be handled # on our side by converting them to RGBA and processing them as such -- # print "casting from indexed to argb32" img = lut[img] # : this creates an rgba image (len(shape)==2) return to_img(img, scalar_type="argb32") elif scalar_type == "argb32": print "using native scalar argb32" qim = QImage(sip.voidptr(img.ctypes.data), img.shape[0], img.shape[1], QImage.Format_ARGB32).copy() return qim elif vdim in [3, 4]: # : We are working on vectorial things like RGB ... data = img.ctypes.data if vdim == 3: print "using native vectorial rgb888" fmt = QImage.Format_RGB888 elif vdim == 4: # ... or RGBA print "using native vectorial argb32" fmt = QImage.Format_ARGB32 else: raise Exception("Unhandled vectorial pixel type") qim = QImage(sip.voidptr(data), img.shape[0], img.shape[1], fmt) return qim.copy() else: raise Exception("Arrays of shape length %s are not handled" % l_sh)
def nextImage(image): return _extractor.nextImage(sip.voidptr(sip.unwrapinstance(image)))
def average(image): return _extractor.average(sip.voidptr(sip.unwrapinstance(image)))