Clipee

Clipboard-based Actions

17 Sep 2022

Since last year, I have started managing clipboard-based actions with my own Python script, instead of 3rd party solutions - they all have limitations.

I'm using this script on a daily basis.

Workflow

Python script assigned ot a key on my Streamdeck !home-office/streamdeck

  1. copy something
  2. press key on Streamdeck

Script prints to a Terminal window, and copies output back to clipboard, ready to be pasted anywhere.

TODO:
- explore AppleScript with keyboard shortcut assignment

Initial script

from pandas.io.clipboard import clipboard_get
import re
import subprocess
from urllib.parse import urlparse
from tldextract import extract
import requests
from datetime import date
from datetime import datetime
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from urllib import request

d = datetime.now()

date = d.strftime('%Y%m%d-%H%M%S')

count_url = 0

def separator(count=50, lines=3, symbol='='):
    separator = f"{symbol * count}" + '\n'
    separator = f"\n{separator * lines}"
    print(separator)

sep = separator()

### Functions

def cleanurl(url: str) -> str:
    from urllib.parse import urlparse
    purl = urlparse(url)
    scheme = purl.scheme + '://' if purl.scheme else ''
    return f'{scheme}{purl.netloc}{purl.path}'


def write_to_clipboard(output):
    process = subprocess.Popen(
        'pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)
    process.communicate(output.encode('utf-8'))
    print(f"\nOUTPUT COPIED TO CLIPBOARD\n")


def html_for_note(text, v=True):

    url = cleanurl(text)
    if v:
        print(f"\n{url=}")

    try:

        html = request.urlopen(url).read().decode('utf8')
        if v:
            print(f"\n{html=}")

        soup = BeautifulSoup(html, "html.parser")
        if v:
            print(f"\n{soup=}")

        title = soup.title.text
        if v:
            print(f"\n{title=}")

        try:
            header = soup.find('h1').text
            if v:
                print(f"{header=}")
            output = f"<div class=\"link_border\">\n<div class=\"link_title\">{title}</div>\n<div class=\"link_title\">{header}</div>\n<div class=\"link_url\"><a href=\"{url}\" target=\"_blank\">{url}</a></div></div>\n"

        except Exception as e:
            print(f"\nheader ERROR: {e}")
            print(f"NO Header found, returning with Title only")
            output = f"<div class=\"link_border\">\n<div class=\"link_title\">{title}</div>\n<div class=\"link_url\"><a href=\"{url}\" target=\"_blank\">{url}</a></div></div>\n"

        print(f'\nOutput:\n--------\n{output}--------\n')
        write_to_clipboard(output)

    except Exception as e:
        print(f"\nhtml_for_note ERROR: {e}\nReturning empty div:")
        output = f"<div class=\"link_border\">\n<div class=\"link_title\">XXXXXXX</div>\n<div class=\"link_url\"><a href=\"{url}\" target=\"_blank\">{url}</a></div></div>\n"
        print(f'\nOutput:\n--------\n{output}--------\n')
        write_to_clipboard(output)

### Switches

def clipee_processing(text):
    global sep

    if "vimeo.com" in text:
        print(f"{sep}\nProcessing as VIMEO LINK EXTRACTION...")
        print(f'\nInput Vimeo: {type(text)}, {text}')
        src = re.search(r'(?<=src=").*?(?=[\?"])', text)
        text = src[0]
        write_to_clipboard(text)
        print(f'\nOutput: {text}\n')

    elif 'fantastical' in text:
        print(f"{sep}\nProcessing as FANTASTICAL DATE LIST...")
        o = urlparse(text)
        uid = f'{o.path}'.replace('/p/', "")
        url = f'https://hub.flexibits.com/scheduling/public/{uid}/'
        html = requests.get(url).text
        soup = BeautifulSoup(open(html), "html.parser")
        name = soup.find(
            'div', class_='TimePicker_timePicker__5yclN')
        count = 0
        for div in name:
            count += 1
            div_name = ''
            for span in div:
                slot = span.find_all('span')
                slot = list(slot)
                join = ": ".join(map(str, slot)).replace(
                    '<span>', "").replace('</span>', "").replace(',', "")
                if join != '':
                    paste = f'{count}) {join}'
                    print(paste)
        print()

    elif "mailto" in text:
        print(f"{sep}\nProcessing as MAILTO CLEANING...\n")
        print(f'\nInput Mailto: {type(text)}, {text}')
        text = text.replace('mailto:', '').strip()
        write_to_clipboard(text)
        print(f'\nOutput: {text}\n')

    elif "?" in text:
        print(f"{sep}\nProcessing as URL CLEANING...\n")
        print(f'\nInput URL with query params: {type(text)}, {text}')
        short_url = cleanurl(text)
        write_to_clipboard(short_url)
        print(f'\nOutput short URL: {short_url}\n')

    elif ".gif" in text:
        print(f"{sep}\nProcessing as GIF DOWNLOAD...\n")
        print(f'\nInput code with .gif: {type(text)}, {text}')
        print('Download Starting...')
        url = text
        r = requests.get(url)
        # this will take only -1 splitted part of the url
        filename = f'/Users/xxx/GIF/{date}.gif'

        with open(filename, 'wb') as output_file:
            output_file.write(r.content)
            print('Download Completed!!!')

    elif '(Event Time Zone)' in text:
        print(f"{sep}\nProcessing as TIMEZONE CLEANING...\n")
        text = text.replace(' (Event Time Zone)', '')
        write_to_clipboard(text)
        print(f'\nOutput: {text}\n')

    elif 'xxx@yyy' in text:
        print(f"{sep}\nProcessing as TERMINAL CLEANING...")
        text = text.replace('xxx@yyy', 'xxx@yyy')
        if '/Users/yyyy/' in text:
            text = text.replace('/Users/yyyy/', '/Users/xxxx/')
        print(f"\n{text}\n")
        write_to_clipboard(text)

    elif '/Users/yyyy/' in text:
        print(f"{sep}\nProcessing as TERMINAL CLEANING...")
        text = text.replace('/Users/yyyy/', '/Users/xxxx/')
        print(f"\n{text}\n")
        write_to_clipboard(text)

    elif text.strip().startswith('http'):
        print(f"{sep}\nProcessing as NOTE LINK...\n")
        html_for_note(text)

    else:
        print(f"\nNO LOGIC identified for this text.")

text = clipboard_get()
print(f"\nProcessing: {repr(text)}\n")

clipee_processing(text)

while count_url < 1000:
    print(f"[CLIPEE] Want to process another?...\n")
    valid = False
    new_url = input(f'Enter new URL: ')
    clipee_processing(new_url)

Clipee family

Rather than continue implementing logic or switches for the script to identify what to do with clipboard content, I broke out the script:

  • Clipee Clean: most of the generic clipboard content cleaning of the initial script.
    and new additions:
  • Clipee Note: generate rich HTML snippet from URL.
  • Clipee Atom: paste clipboard content to new named file in Atom.
  • Clipee Tweet: send a tweet from content of clipboard.

Clipee Clean

Pared down version of the initial script above. No change to it, but simplified script will allow for more flexibility in cleaning tasks.

Clipee Note

Generate HTML snippet with site icon and header/tagline from a URL in clipboard.

Implemented a couple weeks ago and using several times a day since.

Code:

06 Feb 2023 using every day since implementation.

TODO: automate process so any image added to my Notes images folders gets added as hyperlink to helpers/images.md ➤ easy to copy/paste from there and/or keep as an ongoing image clipboard.

Clipee Atom

For the last couple years, I have relied daily on a shortcut on my Streamdeck that was pasting the content of my clipboard to Atom (my text editor !apps/atom).

While I have a clipboard manager, this serves the purpose of keeping the pasted content on a separate screen in front of me, explore content within the simplicity of a text editor, etc.

Challenges were:

  • extra gesture to reach out to the Streamdeck to hit the right key (I know!!)
  • couple seconds of processing (managed via keyboard shortcut automation, eg Cmd N, Cmd V) to get result
  • untitled file means prompt for name when closing or losing data if crash.

As Clipee is designed to enable clipboard actions, I thought about improving on this automation.

Result now:

  • Alfred keyword pa (Paste to Atom) is all I need to do.
  • AppleScript triggered creates a new .txt file with timestamp as name in a central folder, and opens it in Atom.

It's near instantaneous 😁 and all my "pasteboard" is archived, searchable and safe.

Plus it avoids the risk of misfiring if app is not open and at the foreground quickly enough when the automated keyboard shortcuts kick in (another challenge with the initial Streamdeck approack).

See implementation and code in:

Clipee Tweet

27 Sep 2022

See origination: !helpers/twitter-cli

Goal: make it as seamless as possible in my workflow to tweet.

  • write anywhere and copy content
  • Alfred workflow with keyword ct (Clipee Tweet) tweets content of clipboard

Danger to tweet something wrong 😅😂 but oopening tweet after posting for double-check, and deleting tweet if mistake, should be a fine workaround.

resources

implementation

Done in 30mns and working 😁

"Trickiest" bit was the flurry of authentication strings (API KEY, API SECRET, ACCESS_TOKEN & ACCESS_TOKEN_SECRET), and their permissions.
First try returned an Unauthorized response.
Had to re-generate the keys, with a user having read/write permissions.

from pandas.io.clipboard import clipboard_get
import os

# environment variables, ie credentials
from dotenv import load_dotenv
load_dotenv()

# see resources above
import tweepy

# to open my Twitter profile after sending for double-checking
import webbrowser

# to display a confirmation window
from tkinter import simpledialog

# credentials
api_key = os.getenv("API_KEY")
api_secret = os.getenv("API_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")

# authentication of consumer key and secret
auth = tweepy.OAuthHandler(api_key, api_secret)
# authentication of access token and secret
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)

def tweet(text, v=False):

    print(f"Ready to send:\n\n{text}\n\n")

    approval = simpledialog.askstring(f"Sure?",f"Ready to send:\n\n{text}\n\nReally?\n(y to confirm)")

    if approval == 'y':

        api.update_status(status=text)
        webbrowser.get('chrome').open_new_tab(f'https://twitter.com/ndeville')


text = clipboard_get()
print(f"\nProcessing: {repr(text)}\n")

tweet(text)

next: add logic to include images.

Working script 😁

# credentials
api_key = os.getenv("API_KEY")
api_secret = os.getenv("API_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")

# authentication of consumer key and secret
auth = tweepy.OAuthHandler(api_key, api_secret)
# authentication of access token and secret
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)

def tweet(text, v=False):

    multimedia = False

    parts = text.split('\n')
    for part in parts:
        part = part.strip()
        print()
        print(f"{part=}")
        if part.startswith('/Users') and part.endswith('jpg'):
            multimedia = True
            image_path = part
            text = text.replace(image_path, '') # remove the path URL from the tweet text
            print(f"\n{image_path=}\n")

    if multimedia:
        approval = simpledialog.askstring(f"Sure?",f"Ready to send WITH IMAGE attached:\n\n{text}\n\nReally?\n(y to confirm)")
    else:
        approval = simpledialog.askstring(f"Sure?",f"Ready to send:\n\n{text}\n\nReally?\n(y to confirm)")

    if approval == 'y':

        if multimedia:
            api.update_status_with_media(text, image_path)
        else:
            api.update_status(status=text)

        webbrowser.get('chrome').open_new_tab(f'https://twitter.com/ndeville')

text = clipboard_get()
print(f"\nProcessing:\n{repr(text)}\n")

tweet(text)

17 Apr 2023 API access deactivated by Twitter. Would need to switch to paid plan to continue - but not enough use for it.

next: Clipee Grist

Clipee Grist

28 Sep 2022

Pain point: I come across a lot of apps (SaaS mostly) that I want to add to my list (library) to keep them on my radar. This list lives in Grist !apps/Grist.

Goal: add enriched record to Grist from a URL copied (ie in clipboard).

Separating scripts per Grist document for convenience.

29 Sep 2022

Clipee Grist Apps - Alfred shortcut cga - working with script:

# default boilerplate removed for brevity, see https://notes.nicolasdeville/python/boilerplate/
####################
# Clipee Grist - Add Apps

### GLOBAL VARIABLES
test = False 
v = True # verbose mode

doc_id = os.getenv("DOC_ID_PE_APPS")
api_key = os.getenv("GRIST_API_KEY")

apps_data = grist_PE.Apps.fetch_table('Master')
if v:
    print(f"\n{len(apps_data)} apps in Grist.\n")

existing_domains = [x.domain for x in apps_data]

# Categories & Tags
categories_in_grist = set()
tags_in_grist = set()
for app in apps_data:
    categories = app.category
    if categories != None:
        for cat in categories:
            if cat != 'L':
                categories_in_grist.add(cat.lower())
    tags = app.tags
    if tags != None:
        for tag in tags:
            if tag != 'L':
                tags_in_grist.add(tag.lower())


# TODO build dicts for categories and tags to match

category_keywords = list(categories_in_grist) + [
                    # ' ai ',
                    # 'ai-',
                    ] 

tag_keywords = list(tags_in_grist) + [

] 

### 


import webbrowser

### UI
# from tkinter import simpledialog
import pymsgbox # simpler than tkinter / see python/library-pymsgbox
# import pymsgbox.native as pymsgbox # not working, revisit later

### Functions

def separator(count=50, lines=3, symbol='='):
    separator = f"{symbol * count}" + '\n'
    separator = f"\n{separator * lines}"
    print(separator)

sep = separator()

def cleanurl(url: str) -> str:
    from urllib.parse import urlparse
    purl = urlparse(url)
    scheme = purl.scheme + '://' if purl.scheme else ''
    return f'{scheme}{purl.netloc}{purl.path}'

def domain_from_url(url):
    o = tldextract.extract(url)
    domain = f"{o.domain}.{o.suffix}".lower()
    if 'www.' in domain:
        domain = domain.replace('www.','')
    return domain

def domain_name_from_url(url):
    o = tldextract.extract(url)
    domain_name = o.domain.lower()
    if 'www.' in domain_name:
        domain_name = domain_name.replace('www.','')
    return domain_name

def add_to_grist(name, url, summary, app_type, category, tags, slug, domain):
    grist_PE.Apps.add_records('Master', [
                                    {   'name': name,
                                        'url': url,
                                        'status': 'radar',
                                        'summary': summary,
                                        'type': app_type,
                                        'category': category,
                                        'tags': tags,
                                        'slug': slug,
                                        'domain': domain,
                                        }
                                ])

def find_category(text):
    category_list = ['L']
    for keyword in category_keywords:
        if keyword.lower() in text.lower():
            category_list.append(keyword.lower())
    return category_list


def find_tags(text):
    tags_list = ['L']
    for keyword in tag_keywords:
        if keyword.lower() in text.lower():
            tags_list.append(keyword.lower())
    return tags_list

### Main

def add_app(text, v=v):
    url = cleanurl(text.strip())
    if url.startswith('http'):
        print(f"{get_linenumber()} {url=}")

        name = ''
        summary = ''
        logo = ''
        category = ['L']
        tags = ['L']

        domain = domain_from_url(url)
        slug = domain_name_from_url(url)
        app_type = 'SaaS'
        if v:
            print(f"\n{get_linenumber()} {url=}")
            print(f"{get_linenumber()} {domain=}")
            print(f"{get_linenumber()} {slug=}")
            print(f"{get_linenumber()} {app_type=}")

        if domain not in existing_domains:

            try:
                html = request.urlopen(url).read().decode('utf8')
                # if v:
                #     print(f"\n{html=}")

                try:
                    soup = BeautifulSoup(html, "html.parser")
                    # if v:
                    #     print(f"\n{soup=}")

                    try:
                        title = soup.title.text
                        if '\n' in title:
                            title = title.replace('\n', ' ').strip()
                        if v:
                            print(f"\n{get_linenumber()} {title=}")

                        try:
                            header = soup.find('h1').text
                            if '\n' in header:
                                header = header.replace('\n', ' ').strip()
                            if v:
                                print(f"{get_linenumber()} {header=}")
                            if header in title:
                                header = domain
                            if not test:
                                name = title
                                if test:
                                    print(f"{get_linenumber()} {name=}")
                                summary = header
                                if test:
                                    print(f"{get_linenumber()} {summary=}")
                                text = f"{name} {summary}"
                                category = find_category(text)
                                if test:
                                    print(f"{get_linenumber()} {category=}")
                                tags = find_tags(text)
                                if test:
                                    print(f"{get_linenumber()} {tags=}")

                        except Exception as e:
                            print(f"\n{get_linenumber()} h1 ERROR: {e}")
                            name = title


                    except Exception as e:
                        print(f"\n{get_linenumber()} title ERROR: {e}")

                except Exception as e:
                    print(f"\n{get_linenumber()} soup ERROR: {e}")

            except Exception as e:
                print(f"\n{get_linenumber()} html ERROR: {e}")

            print(f"\n{get_linenumber()} Data passed to function add_to_grist:")
            print(f"{name=}")
            print(f"{url=}")
            print(f"{summary=}")
            print(f"{app_type=}")
            print(f"{category=}")
            print(f"{tags=}")
            print(f"{slug=}")
            print(f"{domain=}\n\n")
            print(f"line {get_linenumber()} passed.")

            add_to_grist(name, url, summary, app_type, category, tags, slug, domain)

            if not test:
                webbrowser.get('chrome').open_new_tab(f'<link_to_my_grist_page>')

    else:
        print(f"\n{url} is NOT a URL.")
        pymsgbox.alert(f"\n{url} is NOT a URL.")

text = clipboard_get()
print(f"\nProcessing:\n{repr(text)}\n")

add_app(text)

run_time = round((time.time() - start_time), 1)
print(f'finished in {run_time}s.\n')

Re-using now that script logic for additional list, eg List of Recruiters (Clipee Grist Recruiters / Alfred shortcut cgr) for https://btobsales.eu/.

Clipee CSV

generate Python code of CSV headers given a path to .csv file.

'''
CLIPEE CSV
CSV HEADERS TO PYTHON
Input: path to .csv file
Output: python row code for headers
'''

from pandas.io.clipboard import clipboard_get
import subprocess

import time
start_time = time.time()

import csv
import pymsgbox
import os

pymsgbox.alert(f"Starting {os.path.basename(__file__)}...")

count_col_csv = 0

def write_to_clipboard(output):
    process = subprocess.Popen(
        'pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)
    process.communicate(output.encode('utf-8'))
    print(f"\nOUTPUT COPIED TO CLIPBOARD\n")

def process_csv(text, v=False):
    global count_col_csv

    headers = ''

    try:

        path_to_csv = text.strip()
        if path_to_csv.startswith('/Users'):

            with open(path_to_csv, 'r', newline='', encoding='UTF-8') as h:
                reader = csv.reader(h, delimiter=",")
                header_row = list(reader)[0]

                count_for = -1

                for title in header_row:
                    count_col_csv += 1
                    count_for += 1
                    title = title.lower()
                    if ' ' in title:
                        title = title.replace(' ', '_').lower()
                    if '/' in title:
                        title = title.replace('/', '_').lower()
                    print(f"{title} = row[{count_for}]\n")

                    headers = headers + f"{title} = row[{count_for}]\n"

        write_to_clipboard(headers)

        pymsgbox.alert(f"SUCCESS\nCopied to clipboard: {headers}")

    except Exception as e:
        pymsgbox.alert(f"ERROR: {e}")

text = clipboard_get()

print(f"\nProcessing: {repr(text)}\n")

process_csv(text)

links

social